diff --git a/.gitignore b/.gitignore
index f254e054597..8b87e4d8582 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,6 +9,7 @@
*~
.autogenerated
.coverage
+.mypy_cache
.nova-venv
.project
.pydevproject
@@ -33,7 +34,6 @@ doc/source/api/*
doc/build/*
api-guide/build/*
api-ref/build/*
-placement-api-ref/build/*
etc/nova/nova.conf.sample
etc/nova/policy.yaml.sample
etc/nova/policy.yaml.merged
@@ -48,7 +48,6 @@ nova/vcsversion.py
tools/conf/nova.conf*
doc/source/_static/nova.conf.sample
doc/source/_static/nova.policy.yaml.sample
-doc/source/_static/placement.policy.yaml.sample
# Files created by releasenotes build
releasenotes/build
diff --git a/.gitreview b/.gitreview
index 3a2f61c4b05..665a744a715 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,5 @@
[gerrit]
-host=review.openstack.org
+host=review.opendev.org
port=29418
project=openstack/nova.git
+defaultbranch=stable/xena
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 00000000000..d02bdbdfca7
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,40 @@
+---
+default_language_version:
+ # force all unspecified python hooks to run python3
+ python: python3
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.1.0
+ hooks:
+ - id: trailing-whitespace
+ - id: mixed-line-ending
+ args: ['--fix', 'lf']
+ exclude: '.*\.(svg)$'
+ - id: check-byte-order-marker
+ - id: check-executables-have-shebangs
+ - id: check-merge-conflict
+ - id: debug-statements
+ # nova/cmd/manage.py imports pdb on purpose.
+ exclude: 'nova/cmd/manage.py'
+ - id: check-yaml
+ files: .*\.(yaml|yml)$
+ - repo: https://github.com/Lucas-C/pre-commit-hooks
+ rev: v1.1.13
+ hooks:
+ - id: remove-tabs
+ exclude: '.*\.(svg)$'
+ - repo: local
+ hooks:
+ - id: flake8
+ name: flake8
+ additional_dependencies:
+ - hacking>=3.1.0,<3.2.0
+ language: python
+ entry: flake8
+ files: '^.*\.py$'
+ exclude: '^(doc|releasenotes|tools)/.*$'
+ - repo: https://github.com/pre-commit/mirrors-autopep8
+ rev: v1.6.0
+ hooks:
+ - id: autopep8
+ files: '^.*\.py$'
diff --git a/.zuul.yaml b/.zuul.yaml
index 6cab0ca2500..b0dd326ff55 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,110 +1,225 @@
# See https://docs.openstack.org/infra/manual/drivers.html#naming-with-zuul-v3
# for job naming conventions.
+
- job:
- name: nova-dsvm-base
- parent: legacy-dsvm-base
+ name: nova-tox-functional-centos8-py36
+ parent: openstack-tox-functional-py36
+ nodeset: devstack-single-node-centos-8-stream
description: |
- The base job definition for nova devstack/tempest jobs.
- Contains common configuration.
- timeout: 10800
+ Run tox-based functional tests for the OpenStack Nova project
+ under cPython version 3.6 with Nova specific irrelevant-files list.
+ Uses tox with the ``functional-py36`` environment.
+
+ This job also provides a parent for other projects to run the nova
+ functional tests on their own changes.
required-projects:
- - openstack-infra/devstack-gate
+ # including nova here makes this job reusable by other projects
- openstack/nova
- - openstack/tempest
- irrelevant-files: &dsvm-irrelevant-files
- - ^(placement-)?api-.*$
- - ^(test-|)requirements.txt$
+ - openstack/placement
+ irrelevant-files: &functional-irrelevant-files
- ^.*\.rst$
- - ^.git.*$
- - ^doc/.*$
- - ^nova/hacking/.*$
+ - ^api-.*$
+ - ^doc/(source|test)/.*$
- ^nova/locale/.*$
- - ^nova/tests/.*$
- ^releasenotes/.*$
- - ^setup.cfg$
- - ^tests-py3.txt$
- - ^tools/.*$
- - ^tox.ini$
+ vars:
+ # explicitly stating the work dir makes this job reusable by other
+ # projects
+ zuul_work_dir: src/opendev.org/openstack/nova
+ bindep_profile: test py36
+ timeout: 3600
+ # NOTE(elod.illes): this job started to fail in stable/xena so let's
+ # set it non-voting to unblock the gate.
+ voting: false
- job:
- name: nova-dsvm-multinode-base
- parent: legacy-dsvm-base-multinode
+ name: nova-tox-functional-py38
+ parent: openstack-tox-functional-py38
description: |
- Base job for multinode nova devstack/tempest jobs.
- Will setup firewall rules on all the nodes allowing them to talk to
- each other.
- timeout: 10800
+ Run tox-based functional tests for the OpenStack Nova project
+ under cPython version 3.8 with Nova specific irrelevant-files list.
+ Uses tox with the ``functional-py38`` environment.
+
+ This job also provides a parent for other projects to run the nova
+ functional tests on their own changes.
required-projects:
- - openstack-infra/devstack-gate
+ # including nova here makes this job reusable by other projects
- openstack/nova
- - openstack/tempest
- irrelevant-files: *dsvm-irrelevant-files
- nodeset: legacy-ubuntu-xenial-2-node
+ - openstack/placement
+ irrelevant-files: *functional-irrelevant-files
+ vars:
+ # explicitly stating the work dir makes this job reusable by other
+ # projects
+ zuul_work_dir: src/opendev.org/openstack/nova
+ bindep_profile: test py38
+ timeout: 3600
- job:
- name: nova-tox-functional
- parent: openstack-tox
+ name: nova-tox-functional-py39
+ parent: openstack-tox-functional-py39
description: |
- Run tox-based functional tests for the OpenStack Nova project with Nova
- specific irrelevant-files list. Uses tox with the ``functional``
- environment.
- irrelevant-files: &functional-irrelevant-files
- - ^.*\.rst$
- - ^api-.*$
- - ^doc/source/.*$
- - ^nova/locale/.*$
- - ^placement-api-ref/.*$
- - ^releasenotes/.*$
+ Run tox-based functional tests for the OpenStack Nova project
+ under cPython version 3.9 with Nova specific irrelevant-files list.
+ Uses tox with the ``functional-py39`` environment.
+
+ This job also provides a parent for other projects to run the nova
+ functional tests on their own changes.
+ required-projects:
+ # including nova here makes this job reusable by other projects
+ - openstack/nova
+ - openstack/placement
+ irrelevant-files: *functional-irrelevant-files
vars:
- tox_envlist: functional
+ # explicitly stating the work dir makes this job reusable by other
+ # projects
+ zuul_work_dir: src/opendev.org/openstack/nova
+ bindep_profile: test py39
timeout: 3600
- job:
- name: nova-tox-functional-py35
+ name: nova-tox-validate-backport
parent: openstack-tox
description: |
- Run tox-based functional tests for the OpenStack Nova project
- under cPython version 3.5. with Nova specific irrelevant-files list.
- Uses tox with the ``functional-py35`` environment.
- irrelevant-files: *functional-irrelevant-files
+ Determine whether a backport is ready to be merged by checking whether it
+ has already been merged to master or more recent stable branches.
+
+ Uses tox with the ``validate-backport`` environment.
vars:
- tox_envlist: functional-py35
- timeout: 3600
+ tox_envlist: validate-backport
- job:
- name: nova-caching-scheduler
- parent: nova-dsvm-base
+ name: nova-live-migration
+ parent: tempest-multinode-full-py3
description: |
- Run non-slow Tempest API and scenario tests using the CachingScheduler.
- run: playbooks/legacy/nova-caching-scheduler/run.yaml
- post-run: playbooks/legacy/nova-caching-scheduler/post.yaml
+ Run tempest live migration tests against local qcow2 ephemeral storage
+ and shared LVM/iSCSI cinder volumes.
+ irrelevant-files: &nova-base-irrelevant-files
+ - ^api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/policies/.*$
+ - ^nova/tests/.*$
+ - ^nova/test.py$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tools/.*$
+ - ^tox.ini$
+
+ vars:
+ tox_envlist: all
+ tempest_test_regex: (^tempest\.api\.compute\.admin\.(test_live_migration|test_migration))
+ devstack_services:
+ neutron-trunk: true
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute-feature-enabled:
+ volume_backed_live_migration: true
+ block_migration_for_live_migration: true
+ # NOTE(lyarwood): Skip until bug #1931702 is resolved.
+ block_migrate_cinder_iscsi: false
+ post-run: playbooks/nova-live-migration/post-run.yaml
+ # NOTE(lyarwood): This job is now non-voting until bug #1912310 is resolved
+ # within libvirt/QEMU.
+ voting: false
- job:
- name: nova-cells-v1
- parent: nova-dsvm-base
- run: playbooks/legacy/nova-cells-v1/run.yaml
- post-run: playbooks/legacy/nova-cells-v1/post.yaml
+ name: nova-ovs-hybrid-plug
+ parent: tempest-multinode-full-py3
+ description: |
+ Run move operations, reboot, and evacuation (via the same post-run hook
+ as the nova-live-migration job) tests with the OVS network backend and
+ the "iptables_hybrid" securitygroup firewall driver, aka "hybrid plug".
+ The external events interactions between Nova and Neutron in these
+ situations has historically been fragile. This job exercises them.
+ irrelevant-files: *nova-base-irrelevant-files
+ vars:
+ tox_envlist: all
+ tempest_test_regex: (^tempest\..*compute\..*(migration|resize|reboot).*)
+ devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ ML2_L3_PLUGIN: router
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ securitygroup:
+ firewall_driver: iptables_hybrid
+ group-vars:
+ subnode:
+ devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ ML2_L3_PLUGIN: router
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ securitygroup:
+ firewall_driver: iptables_hybrid
+ post-run: playbooks/nova-live-migration/post-run.yaml
- job:
- name: nova-live-migration
- parent: nova-dsvm-multinode-base
- run: playbooks/legacy/nova-live-migration/run.yaml
- post-run: playbooks/legacy/nova-live-migration/post.yaml
+ name: nova-live-migration-ceph
+ parent: devstack-plugin-ceph-multinode-tempest-py3
+ description: |
+ Run tempest live migration tests against ceph ephemeral storage and
+ cinder volumes.
+ irrelevant-files: *nova-base-irrelevant-files
+ vars:
+ tox_envlist: all
+ tempest_test_regex: (^tempest\.api\.compute\.admin\.(test_live_migration|test_migration))
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute-feature-enabled:
+ volume_backed_live_migration: true
+ block_migration_for_live_migration: false
+ block_migrate_cinder_iscsi: false
+ post-run: playbooks/nova-live-migration/post-run.yaml
+ # NOTE(lyarwood): This job is now non-voting until bug #1912310 is resolved
+ # within libvirt/QEMU.
+ voting: false
- job:
name: nova-lvm
- parent: nova-dsvm-base
+ parent: devstack-tempest
description: |
Run tempest compute API tests using LVM image backend. This only runs
against nova/virt/libvirt/* changes.
- run: playbooks/legacy/nova-lvm/run.yaml
- post-run: playbooks/legacy/nova-lvm/post.yaml
- # Copy irrelevant-files from nova-dsvm-base and then exclude anything
- # that is not in the nova/virt/libvirt/* tree (besides the actual zuul
- # playbook and tempest rc files so this can be self-testing).
+ # Copy irrelevant-files from nova-dsvm-multinode-base and then exclude
+ # anything that is not in nova/virt/libvirt/* or nova/privsep/*.
irrelevant-files:
- - ^(?!.zuul.yaml)(?!playbooks/legacy/nova-lvm/)(?!devstack/tempest-dsvm-lvm-rc)(?!nova/virt/libvirt/).*$
- - ^(placement-)?api-.*$
+ - ^(?!.zuul.yaml)(?!nova/virt/libvirt/)(?!nova/privsep/).*$
+ - ^api-.*$
- ^(test-|)requirements.txt$
- ^.*\.rst$
- ^.git.*$
@@ -112,29 +227,49 @@
- ^nova/hacking/.*$
- ^nova/locale/.*$
- ^nova/tests/.*$
+ - ^nova/test.py$
- ^releasenotes/.*$
- ^setup.cfg$
- - ^tests-py3.txt$
- ^tools/.*$
- ^tox.ini$
- # TODO(mriedem): Make this voting and gating once bug 1771700 is fixed
- # and we've had enough runs to feel comfortable with this setup.
- voting: false
-
-- job:
- name: nova-multiattach
- parent: nova-dsvm-base
- description: |
- Run tempest integration tests with volume multiattach support enabled.
- This job will only work starting with Queens.
- It uses the default Cinder volume type in devstack (lvm) and the
- default compute driver in devstack (libvirt).
- run: playbooks/legacy/nova-multiattach/run.yaml
- post-run: playbooks/legacy/nova-multiattach/post.yaml
+ vars:
+ # We use the "all" environment for tempest_test_regex and
+ # tempest_exclude_regex.
+ tox_envlist: all
+ # Only run compute API tests.
+ tempest_test_regex: ^tempest\.api\.compute
+ # Skip slow tests.
+ tempest_exclude_regex: .*\[.*\bslow\b.*\]
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute-feature-enabled:
+ # NOTE(mriedem): resize of non-volume-backed lvm instances does
+ # not yet work (bug 1831657).
+ resize: false
+ cold_migration: false
+ devstack_localrc:
+ NOVA_BACKEND: LVM
+ # Do not waste time clearing volumes.
+ LVM_VOLUME_CLEAR: none
+ # Disable SSH validation in tests to save time.
+ TEMPEST_RUN_VALIDATION: false
+ # Increase the size of the swift loopback device to accommodate RAW
+ # snapshots from the LV based instance disks.
+ # See bug #1913451 for more details.
+ SWIFT_LOOPBACK_DISK_SIZE: 24G
+ # As above, increase the total image limit per tenant to 10G
+ GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 10240
+ devstack_services:
+ # Disable non-essential services that we don't need for this job.
+ c-bak: false
+# TODO(lucasagomes): Move this job to ML2/OVN when QoS Minimum Bandwidth
+# support is implemented.
+# See: https://docs.openstack.org/neutron/latest/ovn/gaps.html
- job:
name: nova-next
- parent: nova-dsvm-base
+ parent: tempest-multinode-full-py3
description: |
This job was added in Newton when placement and cellsv2
were optional. Placement and cellsv2 are required starting in Ocata. In
@@ -143,44 +278,421 @@
post-test scripts to ensure those scripts are still working,
e.g. archive_deleted_rows. In Queens, this job started testing the
TLS console proxy code in the libvirt driver.
- run: playbooks/legacy/nova-next/run.yaml
- post-run: playbooks/legacy/nova-next/post.yaml
+ Starting in Stein, the job was changed to run with python 3 and enabled
+ volume multi-attach testing.
+ Starting in Train, the job enabled counting quota usage from placement.
+ Starting in Ussuri, the job was changed to multinode.
+ Starting in Wallaby, the job defaults to the q35 machine type.
+ Runs all tempest compute API and most scenario tests concurrently.
+ irrelevant-files: *nova-base-irrelevant-files
+ # Run post-tempest tests like for nova-manage commands.
+ post-run: playbooks/nova-next/post.yaml
+ vars:
+ # We use the "all" environment for tempest_test_regex and
+ # tempest_exclude_regex.
+ tox_envlist: all
+ # Run all compute API tests and most scenario tests at the default
+ # concurrency (nproc/2 which is normally 4 in the gate).
+ tempest_test_regex: ^tempest\.(scenario|api\.compute)
+ # The tempest.scenario.test_network* tests are skipped because they
+ # (1) take a long time and (2) are already covered in the
+ # tempest-slow* job. If this regex gets more complicated use
+ # tempest_test_exclude_list.
+ # FIXME(lyarwood): The tempest.api.compute.admin.test_volume_swap tests
+ # are skipped until bug #1929710 is resolved.
+ tempest_exclude_regex: ^tempest\.(scenario\.test_network|api\.compute\.admin\.test_volume_swap|api\.compute\.servers\.test_device_tagging\.TaggedAttachmentsTest\.test_tagged_attachment)
+ devstack_local_conf:
+ post-config:
+ $NOVA_CPU_CONF:
+ libvirt:
+ # Increase the number of PCIe ports per instance given the q35
+ # machine type attaches more devices by default than pc
+ num_pcie_ports: 24
+ hw_machine_type: "x86_64=q35"
+ compute:
+ # Switch off the provider association refresh, which should
+ # reduce the number of placement calls in steady state. Added in
+ # Stein.
+ resource_provider_association_refresh: 0
+ workarounds:
+ # This wa is an improvement on hard reboot that cannot be turned
+ # on unconditionally. But we know that ml2/ovs sends plug time
+ # events so we can enable this in this ovs job for vnic_type
+ # normal
+ wait_for_vif_plugged_event_during_hard_reboot: normal
+ $NOVA_CONF:
+ quota:
+ # Added in Train.
+ count_usage_from_placement: True
+ scheduler:
+ # Added in Train.
+ query_placement_for_image_type_support: True
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ # Needed for QoS port heal allocation testing.
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ AGENT:
+ tunnel_types: gre,vxlan
+ ml2:
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
+ compute-feature-enabled:
+ # The q35 machine type doesn't support an IDE bus
+ ide_bus: False
+ neutron_plugin_options:
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
+ devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ ML2_L3_PLUGIN: router
+ # Enable TLS between the noVNC proxy & compute nodes; this requires
+ # the tls-proxy service to be enabled. Added in Queens.
+ NOVA_CONSOLE_PROXY_COMPUTE_TLS: True
+ # Added in Stein.
+ ENABLE_VOLUME_MULTIATTACH: True
+ # Added in Ussuri.
+ FORCE_CONFIG_DRIVE: True
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ tls-proxy: true
+ # neutron-* needed for QoS port heal allocation testing.
+ neutron-placement: true
+ neutron-qos: true
+ # Disable non-essential services that we don't need for this job.
+ c-bak: false
+ devstack_plugins:
+ # Needed for QoS port heal allocation testing.
+ neutron: https://opendev.org/openstack/neutron
+ group-vars:
+ subnode:
+ devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ ML2_L3_PLUGIN: router
+ NOVA_USE_SERVICE_TOKEN: True
+ NOVA_CONSOLE_PROXY_COMPUTE_TLS: True
+ FORCE_CONFIG_DRIVE: True
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ tls-proxy: true
+ c-bak: false
+
+- job:
+ name: nova-tempest-v2-api
+ parent: devstack-tempest
+ branches:
+ - master
+ description: |
+ This job runs the Tempest compute tests against v2.0 endpoint.
+ Former names for this job was:
+ * legacy-tempest-dsvm-nova-v20-api
+ vars:
+ tox_envlist: all
+ tempest_test_regex: api.*compute
+ devstack_localrc:
+ TEMPEST_COMPUTE_TYPE: compute_legacy
+
+- job:
+ name: nova-tempest-full-oslo.versionedobjects
+ parent: tempest-full-py3
+ description: |
+ Run test with git version of oslo.versionedobjects to check that
+ changes to nova will work with the next released version of
+ that library.
+ required-projects:
+ - openstack/oslo.versionedobjects
+
+- job:
+ name: nova-grenade-multinode
+ parent: grenade-multinode
+ description: |
+ Run a multinode grenade job and run the smoke, cold and live migration
+ tests with the controller upgraded and the compute on the older release.
+ The former names for this job were "nova-grenade-live-migration" and
+ "legacy-grenade-dsvm-neutron-multinode-live-migration".
+ irrelevant-files: *nova-base-irrelevant-files
+ vars:
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute-feature-enabled:
+ live_migration: true
+ volume_backed_live_migration: true
+ block_migration_for_live_migration: true
+ # NOTE(lyarwood): Skip until bug #1931702 is resolved.
+ block_migrate_cinder_iscsi: false
+ tox_envlist: all
+ tempest_test_regex: ((tempest\.(api\.compute|scenario)\..*smoke.*)|(^tempest\.api\.compute\.admin\.(test_live_migration|test_migration)))
+
+- job:
+ name: nova-multi-cell
+ parent: tempest-multinode-full-py3
+ description: |
+ Multi-node python3 job which runs with two nodes and two non-cell0
+ cells. The compute on the controller runs in cell1 and the compute
+ on the subnode runs in cell2.
+ irrelevant-files: *nova-base-irrelevant-files
+ vars:
+ # We use the "all" environment for tempest_test_regex and
+ # tempest_test_exclude_list.
+ tox_envlist: all
+ # Run compute API and scenario tests.
+ tempest_test_regex: ^tempest\.(scenario|(api\.compute))
+ tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/nova"].src_dir }}/devstack/nova-multi-cell-exclude-list.txt'
+ devstack_local_conf:
+ post-config:
+ $NOVA_CONF:
+ oslo_policy:
+ # The default policy file is policy.json but the
+ # setup-multi-cell-policy role will write to policy.yaml.
+ policy_file: policy.yaml
+ test-config:
+ $TEMPEST_CONFIG:
+ compute-feature-enabled:
+ # Enable cold migration for migrating across cells. Note that
+ # because NOVA_ALLOW_MOVE_TO_SAME_HOST=false, all cold migrations
+ # will move across cells.
+ cold_migration: true
+ devstack_services:
+ # Disable other non-essential services that we don't need for this job.
+ c-bak: false
+ devstack_localrc:
+ # Setup two non-cell0 cells (cell1 and cell2).
+ NOVA_NUM_CELLS: 2
+ # Disable resize to the same host so all resizes will move across
+ # cells.
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ # We only have two computes and we don't yet support cross-cell live
+ # migration.
+ LIVE_MIGRATION_AVAILABLE: false
+ DEVSTACK_PARALLEL: True
+ group-vars:
+ peers:
+ devstack_localrc:
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: true
+ LIVE_MIGRATION_AVAILABLE: false
+ subnode:
+ devstack_localrc:
+ # The subnode compute will get registered with cell2.
+ NOVA_CPU_CELL: 2
+ devstack_services:
+ # Disable other non-essential services that we don't need for this
+ # job.
+ c-bak: false
+ # Perform setup for the multi-cell environment. Note that this runs
+ # before devstack is setup on the controller host.
+ pre-run: playbooks/nova-multi-cell/pre.yaml
+
+- job:
+ name: nova-osprofiler-redis
+ parent: tempest-smoke-py3-osprofiler-redis
+ description: |
+ Runs osprofiler with the Redis collector on a subset of compute-specific
+ tempest-full-py3 smoke tests.
+ irrelevant-files: *nova-base-irrelevant-files
+ required-projects:
+ - openstack/nova
+ vars:
+ # We use the "all" environment for tempest_test_regex.
+ tox_envlist: all
+ # Run compute API and only the test_server_basic_ops scenario tests.
+ tempest_test_regex: ^tempest\.(scenario\.test_server_basic_ops|(api\.compute))
+
+- job:
+ name: nova-ceph-multistore
+ parent: devstack-plugin-ceph-tempest-py3
+ description: |
+ Just like the normal ceph job, but with glance multistore
+ irrelevant-files: *nova-base-irrelevant-files
+ required-projects:
+ - openstack/nova
+ pre-run:
+ - playbooks/ceph/glance-copy-policy.yaml
+ vars:
+ # NOTE(danms): These tests create an empty non-raw image, which nova
+ # will refuse because we set never_download_image_if_on_rbd in this job.
+ # Just skip these tests for this case.
+ devstack_localrc:
+ GLANCE_STANDALONE: True
+ GLANCE_USE_IMPORT_WORKFLOW: True
+ DEVSTACK_PARALLEL: True
+ devstack_local_conf:
+ post-config:
+ $NOVA_CONF:
+ libvirt:
+ images_rbd_glance_store_name: robust
+ workarounds:
+ never_download_image_if_on_rbd: True
+ $GLANCE_API_CONF:
+ DEFAULT:
+ enabled_backends: "cheap:file, robust:rbd"
+ default_log_levels: "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, oslo_messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO, dogpile.core.dogpile=INFO, oslo_policy=DEBUG"
+ glance_store:
+ default_backend: cheap
+ stores: file, http, rbd
+ default_store: file
+ robust:
+ rbd_store_pool: images
+ rbd_store_user: glance
+ rbd_store_ceph_conf: /etc/ceph/ceph.conf
+ cheap:
+ filesystem_store_datadir: /opt/stack/data/glance/images/
+ os_glance_staging_store:
+ filesystem_store_datadir: /opt/stack/data/glance/os_glance_staging_store/
+ os_glance_tasks_store:
+ filesystem_store_datadir: /opt/stack/data/glance/os_glance_tasks_store/
+ $GLANCE_IMAGE_IMPORT_CONF:
+ image_import_opts:
+ image_import_plugins: "['image_conversion']"
+ image_conversion:
+ output_format: raw
- project:
# Please try to keep the list of job names sorted alphabetically.
+ templates:
+ - check-requirements
+ - integrated-gate-compute
+ - openstack-cover-jobs
+ - openstack-python3-xena-jobs
+ - openstack-python3-xena-jobs-arm64
+ - periodic-stable-jobs
+ - publish-openstack-docs-pti
+ - release-notes-jobs-python3
check:
jobs:
# We define our own irrelevant-files so we don't run the job
# on things like nova docs-only changes.
- - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa:
+ - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa:
voting: false
- irrelevant-files: *dsvm-irrelevant-files
- - nova-cells-v1
+ irrelevant-files: *nova-base-irrelevant-files
+ - nova-ceph-multistore:
+ irrelevant-files: *nova-base-irrelevant-files
+ - neutron-linuxbridge-tempest:
+ irrelevant-files:
+ # NOTE(mriedem): This job has its own irrelevant-files section
+ # so that we only run it on changes to networking and libvirt/vif
+ # code; we don't need to run this on all changes.
+ - ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
- nova-live-migration
+ - nova-live-migration-ceph
- nova-lvm
- - nova-multiattach
+ - nova-multi-cell
- nova-next
- - tempest-slow:
- irrelevant-files: *dsvm-irrelevant-files
- - nova-tox-functional
- - nova-tox-functional-py35
- - openstack-tox-lower-constraints
- - tempest-full-py3:
- irrelevant-files: *dsvm-irrelevant-files
+ - nova-ovs-hybrid-plug
+ - nova-tox-validate-backport:
+ voting: false
+ - nova-tox-functional-centos8-py36
+ - nova-tox-functional-py38
+ - nova-tox-functional-py39:
+ voting: false
+ - tempest-integrated-compute:
+ # NOTE(gmann): Policies changes do not need to run all the
+ # integration test jobs. Running only tempest and grenade
+ # common jobs will be enough along with nova functional
+ # and unit tests.
+ irrelevant-files: &policies-irrelevant-files
+ - ^api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^nova/test.py$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tools/.*$
+ - ^tox.ini$
+ - nova-grenade-multinode:
+ irrelevant-files: *policies-irrelevant-files
+ - tempest-ipv6-only:
+ irrelevant-files: *nova-base-irrelevant-files
+ - openstacksdk-functional-devstack:
+ irrelevant-files: *nova-base-irrelevant-files
+ - cyborg-tempest:
+ irrelevant-files: *nova-base-irrelevant-files
+ voting: false
+ - barbican-tempest-plugin-simple-crypto:
+ irrelevant-files: *nova-base-irrelevant-files
+ voting: false
gate:
jobs:
- - nova-cells-v1
- nova-live-migration
- - nova-multiattach
+ - nova-live-migration-ceph
+ - nova-tox-functional-centos8-py36
+ - nova-tox-functional-py38
+ - nova-multi-cell
- nova-next
- - tempest-slow:
- irrelevant-files: *dsvm-irrelevant-files
- - nova-tox-functional
- - nova-tox-functional-py35
- - openstack-tox-lower-constraints
- - tempest-full-py3:
- irrelevant-files: *dsvm-irrelevant-files
+ - nova-tox-validate-backport
+ - nova-ceph-multistore:
+ irrelevant-files: *nova-base-irrelevant-files
+ - neutron-linuxbridge-tempest:
+ irrelevant-files:
+ # NOTE(mriedem): This job has its own irrelevant-files section
+ # so that we only run it on changes to networking and libvirt/vif
+ # code; we don't need to run this on all changes.
+ - ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
+ - tempest-integrated-compute:
+ irrelevant-files: *policies-irrelevant-files
+ - nova-grenade-multinode:
+ irrelevant-files: *policies-irrelevant-files
+ - tempest-ipv6-only:
+ irrelevant-files: *nova-base-irrelevant-files
+ - openstacksdk-functional-devstack:
+ irrelevant-files: *nova-base-irrelevant-files
experimental:
jobs:
- - nova-caching-scheduler
- - os-vif-ovs
+ - ironic-tempest-bfv:
+ irrelevant-files: *nova-base-irrelevant-files
+ - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode:
+ irrelevant-files: *nova-base-irrelevant-files
+ - devstack-plugin-nfs-tempest-full:
+ irrelevant-files: *nova-base-irrelevant-files
+ - nova-osprofiler-redis
+ - tempest-full-py3-opensuse15:
+ irrelevant-files: *nova-base-irrelevant-files
+ - tempest-pg-full:
+ irrelevant-files: *nova-base-irrelevant-files
+ - nova-tempest-full-oslo.versionedobjects:
+ irrelevant-files: *nova-base-irrelevant-files
+ - nova-tempest-v2-api:
+ irrelevant-files: *nova-base-irrelevant-files
+ - neutron-ovs-tempest-dvr-ha-multinode-full:
+ irrelevant-files: *nova-base-irrelevant-files
+ - neutron-ovs-tempest-iptables_hybrid:
+ irrelevant-files: *nova-base-irrelevant-files
+ - os-vif-ovs:
+ irrelevant-files: *nova-base-irrelevant-files
+ - devstack-platform-fedora-latest:
+ irrelevant-files: *nova-base-irrelevant-files
+ - devstack-platform-fedora-latest-virt-preview:
+ irrelevant-files: *nova-base-irrelevant-files
+ - devstack-plugin-ceph-compute-local-ephemeral:
+ irrelevant-files: *nova-base-irrelevant-files
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 535791a4512..f3f8b3ae208 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -1,16 +1,19 @@
-If you would like to contribute to the development of OpenStack,
-you must follow the steps in this page:
+The source repository for this project can be found at:
- https://docs.openstack.org/infra/manual/developers.html
+ https://opendev.org/openstack/nova
-Once those steps have been completed, changes to OpenStack
-should be submitted for review via the Gerrit tool, following
-the workflow documented at:
+Pull requests submitted through GitHub are not monitored.
- https://docs.openstack.org/infra/manual/developers.html#development-workflow
+To start contributing to OpenStack, follow the steps in the contribution guide
+to set up and use Gerrit:
-Pull requests submitted through GitHub will be ignored.
+ https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
-Bugs should be filed on Launchpad, not GitHub:
+Bugs should be filed on Launchpad:
https://bugs.launchpad.net/nova
+
+For more specific information about contributing to this repository, see the
+Nova contributor guide:
+
+ https://docs.openstack.org/nova/latest/contributor/contributing.html
diff --git a/HACKING.rst b/HACKING.rst
index 148097561af..0f98901864d 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -8,7 +8,7 @@ Nova Style Commandments
Nova Specific Commandments
---------------------------
-- ``nova.db`` imports are not allowed in ``nova/virt/*``
+- [N307] ``nova.db`` imports are not allowed in ``nova/virt/*``
- [N309] no db session in public API methods (disabled)
This enforces a guideline defined in ``oslo.db.sqlalchemy.session``
- [N310] timeutils.utcnow() wrapper must be used instead of direct calls to
@@ -25,19 +25,13 @@ Nova Specific Commandments
assertIsInstance(A, B).
- [N317] Change assertEqual(type(A), B) by optimal assert like
assertIsInstance(A, B)
-- [N319] Validate that debug level logs are not translated.
+- [N319] Validate that logs are not translated.
- [N320] Setting CONF.* attributes directly in tests is forbidden. Use
self.flags(option=value) instead.
-- [N321] Validate that LOG messages, except debug ones, have translations
- [N322] Method's default argument shouldn't be mutable
- [N323] Ensure that the _() function is explicitly imported to ensure proper translations.
- [N324] Ensure that jsonutils.%(fun)s must be used instead of json.%(fun)s
-- [N325] str() and unicode() cannot be used on an exception. Remove use or use six.text_type()
- [N326] Translated messages cannot be concatenated. String should be included in translated message.
-- [N327] Do not use xrange(). xrange() is not compatible with Python 3. Use range() or six.moves.range() instead.
-- [N328] Validate that LOG.info messages use _LI.
-- [N329] Validate that LOG.exception messages use _LE.
-- [N330] Validate that LOG.warning and LOG.warn messages use _LW.
- [N332] Check that the api_version decorator is the first decorator on a method
- [N334] Change assertTrue/False(A in/not in B, message) to the more specific
assertIn/NotIn(A, B, message)
@@ -52,9 +46,6 @@ Nova Specific Commandments
- [N341] contextlib.nested is deprecated
- [N342] Config options should be in the central location ``nova/conf/``
- [N343] Check for common double word typos
-- [N344] Python 3: do not use dict.iteritems.
-- [N345] Python 3: do not use dict.iterkeys.
-- [N346] Python 3: do not use dict.itervalues.
- [N348] Deprecated library function os.popen()
- [N349] Check for closures in tests which are not used
- [N350] Policy registration should be in the central location ``nova/policies/``
@@ -68,6 +59,18 @@ Nova Specific Commandments
- [N358] Return must always be followed by a space when returning a value.
- [N359] Check for redundant import aliases.
- [N360] Yield must always be followed by a space when yielding a value.
+- [N361] Check for usage of deprecated assertRegexpMatches and
+ assertNotRegexpMatches
+- [N362] Imports for privsep modules should be specific. Use "import nova.privsep.path",
+ not "from nova.privsep import path". This ensures callers know that the method they're
+ calling is using priviledge escalation.
+- [N363] Disallow ``(not_a_tuple)`` because you meant ``(a_tuple_of_one,)``.
+- [N364] Check non-existent mock assertion methods and attributes.
+- [N365] Check misuse of assertTrue/assertIsNone.
+- [N366] The assert_has_calls is a method rather than a variable.
+- [N367] Disallow aliasing the mock.Mock and similar classes in tests.
+- [N368] Reject if the mock.Mock class is used as a replacement value instead of and
+ instance of a mock.Mock during patching in tests.
Creating Unit Tests
-------------------
@@ -112,6 +115,34 @@ command directly. Running ``stestr run`` will run the entire test suite.
tests in parallel). More information about stestr can be found at:
http://stestr.readthedocs.io/
+Since when testing locally, running the entire test suite on a regular
+basis is prohibitively expensive, the ``tools/run-tests-for-diff.sh``
+script is provided as a convenient way to run selected tests using
+output from ``git diff``. For example, this allows running only the
+test files changed/added in the working tree::
+
+ tools/run-tests-for-diff.sh
+
+However since it passes its arguments directly to ``git diff``, tests
+can be selected in lots of other interesting ways, e.g. it can run all
+tests affected by a single commit at the tip of a given branch::
+
+ tools/run-tests-for-diff.sh mybranch^!
+
+or all those affected by a range of commits, e.g. a branch containing
+a whole patch series for a blueprint::
+
+ tools/run-tests-for-diff.sh gerrit/master..bp/my-blueprint
+
+It supports the same ``-HEAD`` invocation syntax as ``flake8wrap.sh``
+(as used by the ``fast8`` tox environment)::
+
+ tools/run-tests-for-diff.sh -HEAD
+
+By default tests log at ``INFO`` level. It is possible to make them
+log at ``DEBUG`` level by exporting the ``OS_DEBUG`` environment
+variable to ``True``.
+
.. _Development Quickstart: https://docs.openstack.org/nova/latest/contributor/development-environment.html
Building Docs
diff --git a/README.rst b/README.rst
index 7cf790f1eeb..2b7eda2a65e 100644
--- a/README.rst
+++ b/README.rst
@@ -1,18 +1,16 @@
-========================
-Team and repository tags
-========================
+==============
+OpenStack Nova
+==============
.. image:: https://governance.openstack.org/tc/badges/nova.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
.. Change things from this point on
-OpenStack Nova
-==============
OpenStack Nova provides a cloud computing fabric controller, supporting a wide
variety of compute technologies, including: libvirt (KVM, Xen, LXC and more),
-Hyper-V, VMware, XenServer, OpenStack Ironic and PowerVM.
+Hyper-V, VMware, OpenStack Ironic and PowerVM.
Use the following resources to learn more.
@@ -21,8 +19,8 @@ API
To learn how to use Nova's API, consult the documentation available online at:
-- `Compute API Guide `__
-- `Compute API Reference `__
+- `Compute API Guide `__
+- `Compute API Reference `__
For more information on OpenStack APIs, SDKs and CLIs in general, refer to:
diff --git a/api-guide/source/accelerator-support.rst b/api-guide/source/accelerator-support.rst
new file mode 100644
index 00000000000..c71e899fd48
--- /dev/null
+++ b/api-guide/source/accelerator-support.rst
@@ -0,0 +1,143 @@
+==============================
+Using accelerators with Cyborg
+==============================
+
+Starting from microversion 2.82, nova supports creating servers with
+accelerators provisioned with the Cyborg service, which provides lifecycle
+management for accelerators.
+
+To launch servers with accelerators, the administrator (or an user with
+appropriate privileges) must do the following:
+
+* Create a device profile in Cyborg, which specifies what accelerator
+ resources need to be provisioned. (See `Cyborg device profiles API`_.)
+
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+
+* Set the device profile name as an extra spec in a chosen flavor,
+ with this syntax:
+
+ .. code::
+
+ accel:device_profile=$device_profile_name
+
+ The chosen flavor may be a newly created one or an existing one.
+
+* Use that flavor to create a server:
+
+ .. code::
+
+ openstack server create --flavor $myflavor --image $myimage $servername
+
+Nova supports only specific operations for instances with accelerators.
+The lists of supported and unsupported operations are as below:
+
+* Supported operations.
+
+ * Creation and deletion.
+ * Reboots (soft and hard).
+ * Pause and unpause.
+ * Stop and start.
+ * Take a snapshot.
+ * Backup.
+ * Rescue and unrescue.
+ * Rebuild.
+ * Evacuate.
+ * Shelve and unshelve.
+
+* Unsupported operations
+
+ * Resize.
+ * Suspend and resume.
+ * Cold migration.
+ * Live migration.
+
+.. versionchanged:: 22.0.0(Victoria)
+
+ Added support for rebuild and evacuate operations.
+
+.. versionchanged:: 23.0.0(Wallaby)
+
+ Added support for shelve and unshelve operations.
+
+Some operations, such as lock and unlock, work as they are effectively
+no-ops for accelerators.
+
+Caveats
+-------
+
+.. note::
+
+ This information is correct as of the 21.0.0 Ussuri release. Where
+ improvements have been made or issues fixed, they are noted per item.
+
+For nested resource providers:
+
+* Creating servers with accelerators provisioned with the Cyborg service, if
+ a flavor asks for resources that are provided by nested Resource Provider
+ inventories (e.g. vGPU) and the user wants multi-create (i.e. say --max 2)
+ then the scheduler could be returning a NoValidHosts exception even if each
+ nested Resource Provider can support at least one specific instance, if the
+ total wanted capacity is not supported by only one nested Resource Provider.
+ (See `bug 1874664 `_.)
+
+ For example, creating servers with accelerators provisioned with the Cyborg
+ service, if two children RPs have 4 vGPU inventories each:
+
+ * You can ask for a device profile in the flavor with 2 vGPU with --max 2.
+ * But you can't ask for a device profile in the flavor with 4 vGPU and
+ --max 2.
+
+=======================
+Using SRIOV with Cyborg
+=======================
+
+Starting from Xena release, nova supports creating servers with
+SRIOV provisioned with the Cyborg service.
+
+To launch servers with accelerators, the administrator (or an user with
+appropriate privileges) must do the following:
+
+* Create a device profile in Cyborg, which specifies what accelerator
+ resources need to be provisioned. (See `Cyborg device profiles API`_,
+ `Cyborg SRIOV Test Report`_.)
+
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg SRIOV Test Report`: https://wiki.openstack.org/wiki/Cyborg/TestReport/IntelNic
+
+* create a 'accelerator-direct' vnic type port with the device-profile name
+ set as cyborg device profile with this syntax:
+
+ .. code::
+
+ openstack port create $port_name --network $network_name --vnic-type=accelerator-direct --device-profile $device_profile_name
+
+* create a server with that port:
+
+ .. code::
+
+ openstack server create --flavor $myflavor --image $myimage $servername --nic port-id=$port-ID
+
+Nova supports only specific operations for instances with accelerators.
+The lists of supported and unsupported operations are as below:
+
+* Supported operations.
+
+ * Creation and deletion.
+ * Reboots (soft and hard).
+ * Pause and unpause.
+ * Stop and start.
+ * Rebuild.
+ * Rescue and unrescue.
+ * Take a snapshot.
+ * Backup.
+
+* Unsupported operations
+
+ * Resize.
+ * Suspend and resume.
+ * Cold migration.
+ * Live migration.
+ * Shelve and unshelve.
+ * Evacuate.
+ * Attach/detach a port with device profile.
diff --git a/api-guide/source/conf.py b/api-guide/source/conf.py
index 6b0411a3d4f..e07de9e4509 100644
--- a/api-guide/source/conf.py
+++ b/api-guide/source/conf.py
@@ -31,7 +31,8 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['openstackdocstheme']
+extensions = ['openstackdocstheme',
+ 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -39,6 +40,9 @@
# The suffix of source filenames.
source_suffix = '.rst'
+# The 'todo' and 'todolist' directive produce output.
+todo_include_todos = True
+
# The encoding of source files.
# source_encoding = 'utf-8-sig'
@@ -46,12 +50,6 @@
master_doc = 'index'
# General information about the project.
project = u'Compute API Guide'
-bug_tag = u'api-guide'
-repository_name = 'openstack/nova'
-bug_project = 'nova'
-
-# Must set this variable to include year, month, day, hours, and minutes.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
copyright = u'2015, OpenStack contributors'
@@ -94,7 +92,7 @@
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@@ -143,10 +141,6 @@
# directly to the root of the documentation.
# html_extra_path = []
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
@@ -280,6 +274,15 @@
# -- Options for openstackdocstheme -------------------------------------------
-openstack_projects = [
+openstackdocs_projects = [
+ 'glance',
'nova',
+ 'neutron',
+ 'placement',
]
+
+openstackdocs_bug_tag = u'api-guide'
+openstackdocs_repo_name = 'openstack/nova'
+openstackdocs_bug_project = 'nova'
+openstackdocs_auto_version = False
+openstackdocs_auto_name = False
diff --git a/api-guide/source/down_cells.rst b/api-guide/source/down_cells.rst
new file mode 100644
index 00000000000..bd5980d4d3d
--- /dev/null
+++ b/api-guide/source/down_cells.rst
@@ -0,0 +1,353 @@
+===================
+Handling Down Cells
+===================
+
+Starting from microversion 2.69 if there are transient conditions in a
+deployment like partial infrastructure failures (for example a cell
+not being reachable), some API responses may contain partial results
+(i.e. be missing some keys). The server operations which exhibit this
+behavior are described below:
+
+* List Servers (GET /servers): This operation may give partial
+ constructs from the non-responsive portion of the infrastructure. A
+ typical response, while listing servers from unreachable parts of
+ the infrastructure, would include only the following keys from
+ available information:
+
+ - status: The state of the server which will be "UNKNOWN".
+ - id: The UUID of the server.
+ - links: Links to the servers in question.
+
+ A sample response for a GET /servers request that includes one
+ result each from an unreachable and a healthy part of the
+ infrastructure is shown below.
+
+ Response::
+
+ {
+ "servers": [
+ {
+ "status": "UNKNOWN",
+ "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb"
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb"
+ }
+ ]
+ },
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "name": "test_server",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985"
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985"
+ }
+ ]
+ }
+ ]
+ }
+
+* List Servers Detailed (GET /servers/detail): This operation may give
+ partial constructs from the non-responsive portion of the
+ infrastructure. A typical response, while listing servers from
+ unreachable parts of the infrastructure, would include only the
+ following keys from available information:
+
+ - status: The state of the server which will be "UNKNOWN".
+ - id: The UUID of the server.
+ - tenant_id: The tenant_id to which the server belongs to.
+ - created: The time of server creation.
+ - links: Links to the servers in question.
+ - security_groups: One or more security groups. (Optional)
+
+ A sample response for a GET /servers/details request that includes
+ one result each from an unreachable and a healthy part of the
+ infrastructure is shown below.
+
+ Response::
+
+ {
+ "servers": [
+ {
+ "created": "2018-06-29T15:07:29Z",
+ "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb",
+ "status": "UNKNOWN",
+ "tenant_id": "940f47b984034c7f8f9624ab28f5643c",
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2017-10-10T15:49:09.516729",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2017-10-10T15:49:08Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {
+ "hw:numa_nodes": "1"
+ },
+ "original_name": "m1.tiny.specs",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "569f39f9-7c76-42a1-9c2d-8394e2638a6d",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": [
+ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8",
+ "674736e3-f25c-405c-8362-bbf991e0ce0a"
+ ],
+ "updated": "2017-10-10T15:49:09Z",
+ "user_id": "fake"
+ }
+ ]
+ }
+
+ **Edge Cases**
+
+ * **Filters:** If the user is listing servers using filters, results
+ from unreachable parts of the infrastructure cannot be tested for
+ matching those filters and thus no minimalistic construct will be
+ provided. Note that by default ``openstack server list`` uses the
+ ``deleted=False`` and ``project_id=tenant_id`` filters and since
+ we know both of these fundamental values at all times, they are
+ the only allowed filters to be applied to servers with only
+ partial information available. Hence only doing ``openstack
+ server list`` and ``openstack server list --all-projects`` (admin
+ only) will show minimalistic results when parts of the
+ infrastructure are unreachable. Other filters like ``openstack
+ server list --deleted`` or ``openstack server list --host xx``
+ will skip the results depending on the administrator's
+ configuration of the deployment. Note that the filter ``openstack
+ server list --limit`` will also skip the results and if not
+ specified will return 1000 (or the configured default) records
+ from the available parts of the infrastructure.
+
+ * **Marker:** If the user does ``openstack server list --marker`` it will
+ fail with a 500 if the marker is an instance that is no longer reachable.
+
+ * **Sorting:** We exclude the unreachable parts of the infrastructure just like
+ we do for filters since there is no way of obtaining valid sorted results from
+ those parts with missing information.
+
+ * **Paging:** We ignore the parts of the deployment which are non-responsive.
+ For example if we have three cells A (reachable state), B (unreachable state)
+ and C (reachable state) and if the marker is half way in A, we would get the
+ remaining half of the results from A, all the results from C and ignore cell B.
+
+ .. note:: All the edge cases that are not supported for minimal constructs would
+ give responses based on the administrator's configuration of the deployment,
+ either skipping those results or returning an error.
+
+* Show Server Details (GET /servers/{server_id}): This operation may
+ give partial constructs from the non-responsive portion of the
+ infrastructure. A typical response while viewing a server from an
+ unreachable part of the infrastructure would include only the
+ following keys from available information:
+
+ - status: The state of the server which will be "UNKNOWN".
+ - id: The UUID of the server.
+ - tenant_id: The tenant_id to which the server belongs to.
+ - created: The time of server creation.
+ - user_id: The user_id to which the server belongs to. This may be "UNKNOWN"
+ for older servers.
+ - image: The image details of the server. If it is not set like
+ in the boot-from-volume case, this value will be an empty string.
+ - flavor: The flavor details of the server.
+ - availability_zone: The availability_zone of the server if it was specified
+ during boot time and "UNKNOWN" otherwise.
+ - power_state: Its value will be 0 (``NOSTATE``).
+ - links: Links to the servers in question.
+ - server_groups: The UUIDs of the server groups to which the server belongs.
+ Currently this can contain at most one entry. Note that this key will be in
+ the response only from the "2.71" microversion.
+
+ A sample response for a GET /servers/{server_id} request that
+ includes one server from an unreachable part of the infrastructure
+ is shown below.
+
+ Response::
+
+ {
+ "server": [
+ {
+ "created": "2018-06-29T15:07:29Z",
+ "status": "UNKNOWN",
+ "tenant_id": "940f47b984034c7f8f9624ab28f5643c",
+ "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb",
+ "user_id": "940f47b984034c7f8f9624ab28f5643c",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ },
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {
+ "hw:numa_nodes": "1"
+ },
+ "original_name": "m1.tiny.specs",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "OS-EXT-AZ:availability_zone": "geneva",
+ "OS-EXT-STS:power_state": 0,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb",
+ "rel": "bookmark"
+ }
+ ],
+ "server_groups": ["0fd77252-4eef-4ec4-ae9b-e05dfc98aeac"]
+ }
+ ]
+ }
+
+* List Compute Services (GET /os-services): This operation may give
+ partial constructs for the services with :program:`nova-compute` as
+ their binary from the non-responsive portion of the
+ infrastructure. A typical response while listing the compute
+ services from unreachable parts of the infrastructure would include
+ only the following keys for the :program:`nova-compute` services
+ from available information while the other services like the
+ :program:`nova-conductor` service will be skipped from the result:
+
+ - binary: The binary name of the service which would always be
+ ``nova-compute``.
+ - host: The name of the host running the service.
+ - status: The status of the service which will be "UNKNOWN".
+
+ A sample response for a GET /servers request that includes two
+ compute services from unreachable parts of the infrastructure and
+ other services from a healthy one are shown below.
+
+ Response::
+
+ {
+ "services": [
+ {
+ "binary": "nova-compute",
+ "host": "host1",
+ "status": "UNKNOWN"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host2",
+ "status": "UNKNOWN"
+ },
+ {
+ "id": 1,
+ "binary": "nova-scheduler",
+ "disabled_reason": "test1",
+ "host": "host3",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "2012-10-29T13:42:02.000000",
+ "forced_down": false,
+ "zone": "internal"
+ },
+ {
+ "id": 2,
+ "binary": "nova-compute",
+ "disabled_reason": "test2",
+ "host": "host4",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "2012-10-29T13:42:05.000000",
+ "forced_down": false,
+ "zone": "nova"
+ }
+ ]
+ }
diff --git a/api-guide/source/extra_specs_and_properties.rst b/api-guide/source/extra_specs_and_properties.rst
index 29a9ffe942f..cd5411789a6 100644
--- a/api-guide/source/extra_specs_and_properties.rst
+++ b/api-guide/source/extra_specs_and_properties.rst
@@ -2,16 +2,44 @@
Flavor Extra Specs and Image Properties
=======================================
-TODO: Generic description about Flavor Extra Specs and Image Properties.
+Flavor extra specs and image properties are used to control certain aspects
+or scheduling behavior for a server.
+
+The flavor of a server can be changed during a
+:nova-doc:`resize ` operation.
+
+The image of a server can be changed during a
+:nova-doc:`rebuild ` operation.
+
+By default, flavor extra specs are controlled by administrators of the cloud.
+If users are authorized to upload their own images to the image service, they
+may be able to specify their own image property requirements.
+
+There are many cases of flavor extra specs and image properties that are for
+the same functionality. In many cases the image property takes precedence over
+the flavor extra spec if both are used in the same server.
Flavor Extra Specs
==================
-TODO: List the extra specs which we supported at here. The best is the extra
-specs can auto-gen from the nova code.
+Refer to the :nova-doc:`user guide ` for a
+list of official extra specs.
+
+While there are standard extra specs, deployments can define their own extra
+specs to be used with host aggregates and custom scheduler filters as
+necessary. See the
+:nova-doc:`reference guide `
+for more details.
Image Properties
================
-TODO: List the properties which affect the server creation. The best is the
-properties can auto-gen from the image properties object.
+Refer to the image service documentation for a list of official
+:glance-doc:`image properties ` and
+:glance-doc:`metadata definition concepts `.
+
+Unlike flavor extra specs, image properties are standardized in the compute
+service and thus they must be `registered`_ within the compute service before
+they can be used.
+
+.. _registered: https://opendev.org/openstack/nova/src/branch/master/nova/objects/image_meta.py
diff --git a/api-guide/source/faults.rst b/api-guide/source/faults.rst
index 88d0cca5f9c..529b119a457 100644
--- a/api-guide/source/faults.rst
+++ b/api-guide/source/faults.rst
@@ -67,11 +67,167 @@ Response header example::
Server Actions
--------------
-There is an API for end users to list the outcome of Server Actions,
-referencing the requested action by request id.
+Most `server action APIs`_ are asynchronous. Usually the API service will do
+some minimal work and then send the request off to the ``nova-compute`` service
+to complete the action and the API will return a 202 response to the client.
+The client will poll the API until the operation completes, which could be a
+status change on the server but is usually at least always waiting for the
+server ``OS-EXT-STS:task_state`` field to go to ``null`` indicating the action
+has completed either successfully or with an error.
+
+If a server action fails and the server status changes to ``ERROR`` an
+:ref:`instance fault ` will be shown with the server details.
+
+The `os-instance-actions API`_ allows users end users to list the outcome of
+server actions, referencing the requested action by request id. This is useful
+when an action fails and the server status does not change to ``ERROR``.
+
+To illustrate, consider a server (vm1) created with flavor ``m1.tiny``:
+
+.. code-block:: console
+
+ $ openstack server create --flavor m1.tiny --image cirros-0.4.0-x86_64-disk --wait vm1
+ +-----------------------------+-----------------------------------------------------------------+
+ | Field | Value |
+ +-----------------------------+-----------------------------------------------------------------+
+ | OS-DCF:diskConfig | MANUAL |
+ | OS-EXT-AZ:availability_zone | nova |
+ | OS-EXT-STS:power_state | Running |
+ | OS-EXT-STS:task_state | None |
+ | OS-EXT-STS:vm_state | active |
+ | OS-SRV-USG:launched_at | 2019-12-02T19:14:48.000000 |
+ | OS-SRV-USG:terminated_at | None |
+ | accessIPv4 | |
+ | accessIPv6 | |
+ | addresses | private=10.0.0.60, fda0:e0c4:2764:0:f816:3eff:fe03:806 |
+ | adminPass | NgascCr3dYo4 |
+ | config_drive | |
+ | created | 2019-12-02T19:14:42Z |
+ | flavor | m1.tiny (1) |
+ | hostId | 22e88bec09a7e33606348fce0abac0ebbbe091a35e29db1498ec4e14 |
+ | id | 344174b8-34fd-4017-ae29-b9084dcf3861 |
+ | image | cirros-0.4.0-x86_64-disk (cce5e6d6-d359-4152-b277-1b4f1871557f) |
+ | key_name | None |
+ | name | vm1 |
+ | progress | 0 |
+ | project_id | b22597ea961545f3bde1b2ede0bd5b91 |
+ | properties | |
+ | security_groups | name='default' |
+ | status | ACTIVE |
+ | updated | 2019-12-02T19:14:49Z |
+ | user_id | 046033fb3f824550999752b6525adbac |
+ | volumes_attached | |
+ +-----------------------------+-----------------------------------------------------------------+
+
+The owner of the server then tries to resize the server to flavor ``m1.small``
+which fails because there are no hosts available on which to resize the server:
+
+.. code-block:: console
+
+ $ openstack server resize --flavor m1.small --wait vm1
+ Complete
+
+Despite the openstack command saying the operation completed, the server shows
+the original ``m1.tiny`` flavor and the status is not ``VERIFY_RESIZE``:
+
+.. code-block::
+
+ $ openstack server show vm1 -f value -c status -c flavor
+ m1.tiny (1)
+ ACTIVE
+
+Since the status is not ``ERROR`` there are is no ``fault`` field in the server
+details so we find the details by listing the events for the server:
+
+.. code-block:: console
+
+ $ openstack server event list vm1
+ +------------------------------------------+--------------------------------------+--------+----------------------------+
+ | Request ID | Server ID | Action | Start Time |
+ +------------------------------------------+--------------------------------------+--------+----------------------------+
+ | req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae | 344174b8-34fd-4017-ae29-b9084dcf3861 | resize | 2019-12-02T19:15:35.000000 |
+ | req-4cdc4c93-0668-4ae6-98c8-a0a5fcc63d39 | 344174b8-34fd-4017-ae29-b9084dcf3861 | create | 2019-12-02T19:14:42.000000 |
+ +------------------------------------------+--------------------------------------+--------+----------------------------+
+
+To see details about the ``resize`` action, we use the Request ID for that
+action:
+
+.. code-block:: console
+
+ $ openstack server event show vm1 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae
+ +---------------+------------------------------------------+
+ | Field | Value |
+ +---------------+------------------------------------------+
+ | action | resize |
+ | instance_uuid | 344174b8-34fd-4017-ae29-b9084dcf3861 |
+ | message | Error |
+ | project_id | b22597ea961545f3bde1b2ede0bd5b91 |
+ | request_id | req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae |
+ | start_time | 2019-12-02T19:15:35.000000 |
+ | user_id | 046033fb3f824550999752b6525adbac |
+ +---------------+------------------------------------------+
+
+We see the message is "Error" but are not sure what failed. By default the
+event details for an action are not shown to users without the admin role so
+use microversion 2.51 to see the events (the ``events`` field is JSON-formatted
+here for readability):
+
+.. code-block::
+
+ $ openstack --os-compute-api-version 2.51 server event show vm1 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae -f json -c events
+ {
+ "events": [
+ {
+ "event": "cold_migrate",
+ "start_time": "2019-12-02T19:15:35.000000",
+ "finish_time": "2019-12-02T19:15:36.000000",
+ "result": "Error"
+ },
+ {
+ "event": "conductor_migrate_server",
+ "start_time": "2019-12-02T19:15:35.000000",
+ "finish_time": "2019-12-02T19:15:36.000000",
+ "result": "Error"
+ }
+ ]
+ }
+
+By default policy configuration a user with the admin role can see a
+``traceback`` for each failed event just like with an instance fault:
+
+.. code-block::
-For more details, please see:
-https://developer.openstack.org/api-ref/compute/#servers-run-an-action-servers-action
+ $ source openrc admin admin
+ $ openstack --os-compute-api-version 2.51 server event show 344174b8-34fd-4017-ae29-b9084dcf3861 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae -f json -c events
+ {
+ "events": [
+ {
+ "event": "cold_migrate",
+ "start_time": "2019-12-02T19:15:35.000000",
+ "finish_time": "2019-12-02T19:15:36.000000",
+ "result": "Error",
+ "traceback": " File \"/opt/stack/nova/nova/conductor/manager.py\",
+ line 301, in migrate_server\n host_list)\n
+ File \"/opt/stack/nova/nova/conductor/manager.py\", line 367, in
+ _cold_migrate\n raise exception.NoValidHost(reason=msg)\n"
+ },
+ {
+ "event": "conductor_migrate_server",
+ "start_time": "2019-12-02T19:15:35.000000",
+ "finish_time": "2019-12-02T19:15:36.000000",
+ "result": "Error",
+ "traceback": " File \"/opt/stack/nova/nova/compute/utils.py\",
+ line 1410, in decorated_function\n return function(self, context,
+ *args, **kwargs)\n File \"/opt/stack/nova/nova/conductor/manager.py\",
+ line 301, in migrate_server\n host_list)\n
+ File \"/opt/stack/nova/nova/conductor/manager.py\", line 367, in
+ _cold_migrate\n raise exception.NoValidHost(reason=msg)\n"
+ }
+ ]
+ }
+
+.. _server action APIs: https://docs.openstack.org/api-ref/compute/#servers-run-an-action-servers-action
+.. _os-instance-actions API: https://docs.openstack.org/api-ref/compute/#servers-actions-servers-os-instance-actions
Logs
----
@@ -104,6 +260,8 @@ while neutron is using local request ID
The local request IDs are useful to make 'call graphs'.
+.. _instance-fault:
+
Instance Faults
---------------
@@ -135,6 +293,7 @@ In many cases there are also notifications emitted that describe the error.
This is an administrator focused API, that works best when treated as
structured logging.
+.. _synchronous_faults:
Synchronous Faults
==================
@@ -167,7 +326,7 @@ depending on the type of error. The following link contains a list of possible
elements along with their associated error codes.
For more information on possible error code, please see:
-http://specs.openstack.org/openstack/api-wg/guidelines/http.html#http-response-codes
+http://specs.openstack.org/openstack/api-wg/guidelines/http/response-codes.html
Asynchronous faults
===================
@@ -179,7 +338,7 @@ In these cases, the server is usually placed in an ``ERROR`` state. For some
operations, like resize, it is possible that the operation fails but
the instance gracefully returned to its original state before attempting the
operation. In both of these cases, you should be able to find out more from
-the Server Actions API described above.
+the `Server Actions`_ API described above.
When a server is placed into an ``ERROR`` state, a fault is embedded in the
offending server. Note that these asynchronous faults follow the same format
diff --git a/api-guide/source/general_info.rst b/api-guide/source/general_info.rst
index 3dca099d6d4..b0e85749605 100644
--- a/api-guide/source/general_info.rst
+++ b/api-guide/source/general_info.rst
@@ -42,7 +42,7 @@ several key concepts:
- **Flavor Extra Specs**
Key and value pairs that can be used to describe the specification of
- the server which more than just about CPU, disk and RAM. For example,
+ the server which is more than just about CPU, disk and RAM. For example,
it can be used to indicate that the server created by this flavor has
PCI devices, etc.
@@ -60,7 +60,7 @@ several key concepts:
- **Image Properties**
Key and value pairs that can help end users to determine the requirements
- of the guest os in the image.
+ of the guest operating system in the image.
For more details, please see: :doc:`extra_specs_and_properties`
@@ -108,23 +108,27 @@ several key concepts:
Networking Concepts
-------------------
-In this section we focus on this related to networking.
+Networking is handled by the :neutron-doc:`networking service <>`. When working
+with a server in the compute service, the most important networking resource
+is a *port* which is part of a *network*. Ports can have *security groups*
+applied to control firewall access. Ports can also be linked to *floating IPs*
+for external network access depending on the networking service configuration.
-- **Port**
+When creating a server or attaching a network interface to an existing server,
+zero or more networks and/or ports can be specified to attach to the server.
+If nothing is provided, the compute service will by default create a port on
+the single network available to the project making the request. If more than
+one network is available to the project, such as a public external network and
+a private tenant network, an error will occur and the request will have to be
+made with a specific network or port. If a network is specified the compute
+service will attempt to create a port on the given network on behalf of the
+user. More advanced types of ports, such as
+:neutron-doc:`SR-IOV ports `, must be pre-created and
+provided to the compute service.
- TODO
+Refer to the `network API reference`_ for more details.
-- **Floating IPs, Pools and DNS**
-
- TODO
-
-- **Security Groups**
-
- TODO
-
-- **Extended Networks**
-
- TODO
+.. _network API reference: https://docs.openstack.org/api-ref/network/
Administrator Concepts
@@ -171,17 +175,12 @@ on compute hosts rather than servers.
This service runs on every compute node, and communicates with a
hypervisor for managing compute resources on that node.
- - **nova-network (deprecated)**
-
- This service handles networking of virtual servers. It is no longer under
- active development, and is being replaced by Neutron.
-
- - **nova-consoleauth (deprecated)**
-
- This service provides authorization for compute instances consoles.
-
- **Services Actions**
+ .. note::
+ The services actions described in this section apply only to
+ **nova-compute** services.
+
- **enable, disable, disable-log-reason**
The service can be disabled to indicate the service is not available anymore.
@@ -196,20 +195,31 @@ on compute hosts rather than servers.
.. note::
This action is enabled in microversion 2.11.
- This action allows you set the state of service down immediately. Actually
- Nova only provides the health monitor of service status, there isn't any
- guarantee about health status of other parts of infrastructure, like the
- health status of data network, storage network and other components. The
- more complete health monitor of infrastructure is provided by external
- system normally. An external health monitor system can mark the service
- down for notifying the fault.
+ This action allows you set the state of service down immediately. Nova
+ only provides a very basic health monitor of service status, there isn't
+ any guarantee about health status of other parts of infrastructure, like
+ the health status of data network, storage network and other
+ components.
+
+ If you have a more extensive health monitoring system external to Nova,
+ and know that the service in question is dead (and disconnected from the
+ network), this can be used to tell the rest of Nova it can trust that this
+ service is never coming back, and allow actions such as evacuate.
+
+ .. warning::
+
+ This must *only* be used if you have fully fenced the service in
+ question, and that it can never send updates to the rest of the
+ system. This can be done by powering off the node or completely
+ isolating its networking. If you force-down a service that is not
+ fenced you can corrupt the VMs that were running on that host.
- **Hosts**
Hosts are the *physical machines* that provide the resources for the virtual
- servers created in Nova. They run a ``hypervisor`` (see definition below)
+ servers created in Nova. They run a **hypervisor** (see definition below)
that handles the actual creation and management of the virtual servers.
- Hosts also run the ``Nova compute service``, which receives requests from
+ Hosts also run the **Nova compute service**, which receives requests from
Nova to interact with the virtual servers on that machine. When compute
service receives a request, it calls the appropriate methods of the driver
for that hypervisor in order to carry out the request. The driver acts as
@@ -261,30 +271,3 @@ on compute hosts rather than servers.
Administrators are able to query the records in database for information
about migrations. For example, they can determine the source and
destination hosts, type of migration, or changes in the server's flavor.
-
-Relationship with Volume API
-============================
-
-Here we discuss about Cinder's API and how Nova users volume UUIDs.
-
-TODO - add more details.
-
-Relationship with Image API
-===========================
-
-Here we discuss about Glance's API and how Nova uses image UUIDs.
-We also discuss how Nova proxies setting image metadata.
-
-TODO - add more details.
-
-Interactions with neutron and nova-network (deprecated)
-=======================================================
-
-We talk about how networking can be provided be either neutron or
-nova-network (deprecated).
-
-Here we discuss about Neutron's API and how Nova users port UUIDs.
-We also discuss Nova automatically creating ports, proxying security groups,
-and proxying floating IPs. Also talk about the APIs we do not proxy.
-
-TODO - add more details.
diff --git a/api-guide/source/index.rst b/api-guide/source/index.rst
index 4c1b425f74e..2e6ac8042b7 100644
--- a/api-guide/source/index.rst
+++ b/api-guide/source/index.rst
@@ -24,7 +24,7 @@ compute resources might be Virtual Machines, Physical Machines or Containers.
This guide covers the concepts in the OpenStack Compute API.
For a full reference listing, please see:
-`Compute API Reference `__.
+`Compute API Reference `__.
We welcome feedback, comments, and bug reports at
`bugs.launchpad.net/nova `__.
@@ -60,7 +60,7 @@ the following endpoints:
* / - list of available versions
* /v2 - the first version of the Compute API, uses extensions
- (we call this Compute API v2.0)
+ (we call this Compute API v2.0)
* /v2.1 - same API, except uses microversions
While this guide concentrates on documenting the v2.1 API,
@@ -79,14 +79,13 @@ Contents
general_info
server_concepts
authentication
+ extra_specs_and_properties
faults
limits
links_and_references
paginated_collections
- polling_changes-since_parameter
+ polling_changes
request_and_response_formats
-
-.. toctree::
- :hidden:
-
- extra_specs_and_properties
+ down_cells
+ port_with_resource_request
+ accelerator-support
diff --git a/api-guide/source/limits.rst b/api-guide/source/limits.rst
index c2ed0af8593..a2f6b49edb6 100644
--- a/api-guide/source/limits.rst
+++ b/api-guide/source/limits.rst
@@ -9,7 +9,7 @@ operators and may differ from one deployment of the OpenStack Compute service
to another. Please contact your provider to determine the limits that
apply to your account. Your provider may be able to adjust your
account's limits if they are too low. Also see the API Reference for
-`Limits `__.
+`Limits `__.
Absolute limits
~~~~~~~~~~~~~~~
@@ -49,4 +49,4 @@ Determine limits programmatically
Applications can programmatically determine current account limits. For
information, see
-`Limits `__.
+`Limits `__.
diff --git a/api-guide/source/microversions.rst b/api-guide/source/microversions.rst
index 1b202665edf..b1590123cbc 100644
--- a/api-guide/source/microversions.rst
+++ b/api-guide/source/microversions.rst
@@ -27,24 +27,24 @@ There are multiple cases which you can resolve with microversions:
- **Older clients with new cloud**
-Before using an old client to talk to a newer cloud, the old client can check
-the minimum version of microversions to verify whether the cloud is compatible
-with the old API. This prevents the old client from breaking with backwards
-incompatible API changes.
-
-Currently the minimum version of microversions is `2.1`, which is a
-microversion compatible with the legacy v2 API. That means the legacy v2 API
-user doesn't need to worry that their older client software will be broken when
-their cloud is upgraded with new versions. And the cloud operator doesn't need
-to worry that upgrading their cloud to newer versions will break any user with
-older clients that don't expect these changes.
+ Before using an old client to talk to a newer cloud, the old client can check
+ the minimum version of microversions to verify whether the cloud is
+ compatible with the old API. This prevents the old client from breaking with
+ backwards incompatible API changes.
+
+ Currently the minimum version of microversions is `2.1`, which is a
+ microversion compatible with the legacy v2 API. That means the legacy v2 API
+ user doesn't need to worry that their older client software will be broken
+ when their cloud is upgraded with new versions. And the cloud operator
+ doesn't need to worry that upgrading their cloud to newer versions will
+ break any user with older clients that don't expect these changes.
- **User discovery of available features between clouds**
-The new features can be discovered by microversions. The user client should
-check the microversions firstly, and new features are only enabled when clouds
-support. In this way, the user client can work with clouds that have deployed
-different microversions simultaneously.
+ The new features can be discovered by microversions. The user client should
+ check the microversions firstly, and new features are only enabled when
+ clouds support. In this way, the user client can work with clouds that have
+ deployed different microversions simultaneously.
Version Discovery
=================
@@ -52,7 +52,7 @@ Version Discovery
The Version API will return the minimum and maximum microversions. These values
are used by the client to discover the API's supported microversion(s).
-Requests to '/' will get version info for all endpoints. A response would look
+Requests to `/` will get version info for all endpoints. A response would look
as follows::
{
@@ -86,12 +86,12 @@ as follows::
]
}
-"version" is the maximum microversion, "min_version" is the minimum
+``version`` is the maximum microversion, ``min_version`` is the minimum
microversion. If the value is the empty string, it means this endpoint doesn't
support microversions; it is a legacy v2 API endpoint -- for example, the
endpoint `http://openstack.example.com/v2/` in the above sample. The endpoint
`http://openstack.example.com/v2.1/` supports microversions; the maximum
-microversion is '2.14', and the minimum microversion is '2.1'. The client
+microversion is `2.14`, and the minimum microversion is `2.1`. The client
should specify a microversion between (and including) the minimum and maximum
microversion to access the endpoint.
@@ -117,20 +117,20 @@ following header to specify the microversion::
This acts conceptually like the "Accept" header. Semantically this means:
-* If neither `X-OpenStack-Nova-API-Version` nor `OpenStack-API-Version`
+* If neither ``X-OpenStack-Nova-API-Version`` nor ``OpenStack-API-Version``
(specifying `compute`) is provided, act as if the minimum supported
microversion was specified.
-* If both headers are provided, `OpenStack-API-Version` will be preferred.
+* If both headers are provided, ``OpenStack-API-Version`` will be preferred.
-* If `X-OpenStack-Nova-API-Version` or `OpenStack-API-Version` is provided,
+* If ``X-OpenStack-Nova-API-Version`` or ``OpenStack-API-Version`` is provided,
respond with the API at that microversion. If that's outside of the range
of microversions supported, return 406 Not Acceptable.
-* If `X-OpenStack-Nova-API-Version` or `OpenStack-API-Version` has a value
- of ``latest`` (special keyword), act as if maximum was specified.
+* If ``X-OpenStack-Nova-API-Version`` or ``OpenStack-API-Version`` has a value
+ of `latest` (special keyword), act as if maximum was specified.
-.. warning:: The ``latest`` value is mostly meant for integration testing and
+.. warning:: The `latest` value is mostly meant for integration testing and
would be dangerous to rely on in client code since microversions are not
following semver and therefore backward compatibility is not guaranteed.
Clients should always require a specific microversion but limit what is
@@ -149,7 +149,7 @@ the response::
The first header specifies the microversion number of the API which was
executed.
-The `Vary` header is used as a hint to caching proxies that the response
+The ``Vary`` header is used as a hint to caching proxies that the response
is also dependent on the microversion and not just the body and query
parameters. See :rfc:`2616` section 14.44 for details.
diff --git a/api-guide/source/paginated_collections.rst b/api-guide/source/paginated_collections.rst
index 08f7d137be6..e817642bf2d 100644
--- a/api-guide/source/paginated_collections.rst
+++ b/api-guide/source/paginated_collections.rst
@@ -4,18 +4,18 @@ Paginated collections
To reduce load on the service, list operations return a maximum number
of items at a time. The maximum number of items returned is determined
-by the compute provider. To navigate the collection, the *``limit``* and
-*``marker``* parameters can be set in the URI. For example:
+by the compute provider. To navigate the collection, the ``limit`` and
+``marker`` parameters can be set in the URI. For example:
.. code::
?limit=100&marker=1234
-The *``marker``* parameter is the ID of the last item in the previous
+The ``marker`` parameter is the ID of the last item in the previous
list. By default, the service sorts items by create time in descending order.
When the service cannot identify a create time, it sorts items by ID. The
-*``limit``* parameter sets the page size. Both parameters are optional. If the
-client requests a *``limit``* beyond one that is supported by the deployment
+``limit`` parameter sets the page size. Both parameters are optional. If the
+client requests a ``limit`` beyond one that is supported by the deployment
an overLimit (413) fault may be thrown. A marker with an invalid ID returns
a badRequest (400) fault.
@@ -25,11 +25,11 @@ implementation does not contain ``previous`` links. The last
page in the list does not contain a link to "next" page. The following examples
illustrate three pages in a collection of servers. The first page was
retrieved through a **GET** to
-``http://servers.api.openstack.org/v2.1/servers?limit=1``. In these
+`http://servers.api.openstack.org/v2.1/servers?limit=1`. In these
examples, the *``limit``* parameter sets the page size to a single item.
Subsequent links honor the initial page size. Thus, a client can follow
links to traverse a paginated collection without having to input the
-*``marker``* parameter.
+``marker`` parameter.
**Example: Servers collection: JSON (first page)**
diff --git a/api-guide/source/polling_changes-since_parameter.rst b/api-guide/source/polling_changes-since_parameter.rst
deleted file mode 100644
index 52ea273af90..00000000000
--- a/api-guide/source/polling_changes-since_parameter.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-==================================================
-Efficient polling with the Changes-Since parameter
-==================================================
-
-The REST API allows you to poll for the status of certain operations by
-performing a **GET** on various elements. Rather than re-downloading and
-re-parsing the full status at each polling interval, your REST client
-may use the *``changes-since``* parameter to check for changes since a
-previous request. The *``changes-since``* time is specified as an `ISO
-8601 `__ dateTime
-(2011-01-24T17:08Z). The form for the timestamp is CCYY-MM-DDThh:mm:ss.
-An optional time zone may be written in by appending the form ±hh:mm
-which describes the timezone as an offset from UTC. When the timezone is
-not specified (2011-01-24T17:08), the UTC timezone is assumed. If
-nothing has changed since the *``changes-since``* time, an empty list is
-returned. If data has changed, only the items changed since the
-specified time are returned in the response. For example, performing a
-**GET** against
-https://api.servers.openstack.org/v2.1/servers?\ *``changes-since``*\ =2015-01-24T17:08Z
-would list all servers that have changed since Mon, 24 Jan 2015 17:08:00
-UTC.
-
-To allow clients to keep track of changes, the changes-since filter
-displays items that have been *recently* deleted. Both images and
-servers contain a ``DELETED`` status that indicates that the resource
-has been removed. Implementations are not required to keep track of
-deleted resources indefinitely, so sending a changes since time in the
-distant past may miss deletions.
diff --git a/api-guide/source/polling_changes.rst b/api-guide/source/polling_changes.rst
new file mode 100644
index 00000000000..671ad894341
--- /dev/null
+++ b/api-guide/source/polling_changes.rst
@@ -0,0 +1,81 @@
+=================
+Efficient polling
+=================
+
+The REST API allows you to poll for the status of certain operations by
+performing a **GET** on various elements. Rather than re-downloading and
+re-parsing the full status at each polling interval, your REST client may
+use the ``changes-since`` and/or ``changes-before`` parameters to check
+for changes within a specified time.
+
+The ``changes-since`` time or ``changes-before`` time is specified as
+an `ISO 8601 `__ dateTime
+(`2011-01-24T17:08Z`). The form for the timestamp is **CCYY-MM-DDThh:mm:ss**.
+An optional time zone may be written in by appending the form ±hh:mm
+which describes the timezone as an offset from UTC. When the timezone is
+not specified (`2011-01-24T17:08`), the UTC timezone is assumed.
+
+The following situations need to be considered:
+
+* If nothing has changed since the ``changes-since`` time, an empty list is
+ returned. If data has changed, only the items changed since the specified
+ time are returned in the response. For example, performing a
+ **GET** against::
+
+ https://api.servers.openstack.org/v2.1/servers?changes-since=2015-01-24T17:08Z
+
+ would list all servers that have changed since Mon, 24 Jan 2015 17:08:00
+ UTC.
+
+* If nothing has changed earlier than or equal to the ``changes-before``
+ time, an empty list is returned. If data has changed, only the items
+ changed earlier than or equal to the specified time are returned in the
+ response. For example, performing a **GET** against::
+
+ https://api.servers.openstack.org/v2.1/servers?changes-before=2015-01-24T17:08Z
+
+ would list all servers that have changed earlier than or equal to
+ Mon, 24 Jan 2015 17:08:00 UTC.
+
+* If nothing has changed later than or equal to ``changes-since``, or
+ earlier than or equal to ``changes-before``, an empty list is returned.
+ If data has changed, only the items changed between ``changes-since``
+ time and ``changes-before`` time are returned in the response.
+ For example, performing a **GET** against::
+
+ https://api.servers.openstack.org/v2.1/servers?changes-since=2015-01-24T17:08Z&changes-before=2015-01-25T17:08Z
+
+ would list all servers that have changed later than or equal to Mon,
+ 24 Jan 2015 17:08:00 UTC, and earlier than or equal to Mon, 25 Jan 2015
+ 17:08:00 UTC.
+
+Microversion change history for servers, instance actions and migrations
+regarding ``changes-since`` and ``changes-before``:
+
+* The `2.21 microversion`_ allows reading instance actions for a deleted
+ server resource.
+* The `2.58 microversion`_ allows filtering on ``changes-since`` when listing
+ instance actions for a server.
+* The `2.59 microversion`_ allows filtering on ``changes-since`` when listing
+ migration records.
+* The `2.66 microversion`_ adds the ``changes-before`` filter when listing
+ servers, instance actions and migrations.
+
+The ``changes-since`` filter nor the ``changes-before`` filter
+change any read-deleted behavior in the os-instance-actions or
+os-migrations APIs. The os-instance-actions API with the 2.21 microversion
+allows retrieving instance actions for a deleted server resource.
+The os-migrations API takes an optional ``instance_uuid`` filter parameter
+but does not support returning deleted migration records.
+
+To allow clients to keep track of changes, the ``changes-since`` filter
+and ``changes-before`` filter displays items that have been *recently*
+deleted. Servers contain a ``DELETED`` status that indicates that the
+resource has been removed. Implementations are not required to keep track
+of deleted resources indefinitely, so sending a ``changes-since`` time or
+a ``changes-before`` time in the distant past may miss deletions.
+
+.. _2.21 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id19
+.. _2.58 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id53
+.. _2.59 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id54
+.. _2.66 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id59
diff --git a/api-guide/source/port_with_resource_request.rst b/api-guide/source/port_with_resource_request.rst
new file mode 100644
index 00000000000..9ca93f2fcff
--- /dev/null
+++ b/api-guide/source/port_with_resource_request.rst
@@ -0,0 +1,53 @@
+=================================
+Using ports with resource request
+=================================
+
+Starting from microversion 2.72 nova supports creating servers with neutron
+ports having resource request visible as a admin-only port attribute
+``resource_request``. For example a neutron port has resource request if it has
+a QoS minimum bandwidth rule attached. Deleting such servers or detaching such
+ports works since Stein version of nova without requiring any specific
+microversion.
+
+However the following API operations are still not supported in nova:
+
+* Creating servers with neutron networks having QoS minimum bandwidth rule is
+ not supported. The user needs to pre-create the port in that neutron network
+ and create the server with the pre-created port.
+
+* Attaching Neutron ports and networks having QoS minimum bandwidth rule is not
+ supported.
+
+Also the following API operations are not supported in the 19.0.0 (Stein)
+version of nova:
+
+* Moving (resizing, migrating, live-migrating, evacuating, unshelving after
+ shelve offload) servers with ports having resource request is not yet
+ supported.
+
+As of 20.0.0 (Train), nova supports cold migrating and resizing servers with
+neutron ports having resource requests if both the source and destination
+compute services are upgraded to 20.0.0 (Train) and the
+``[upgrade_levels]/compute`` configuration does not prevent the computes from
+using the latest RPC version. However cross cell resize and cross cell migrate
+operations are still not supported with such ports and Nova will fall back to
+same-cell resize if the server has such ports.
+
+As of 21.0.0 (Ussuri), nova supports evacuating, live migrating and unshelving
+servers with neutron ports having resource requests.
+
+As of 23.0.0 (Wallaby), nova supports attaching neutron ports having QoS
+minimum bandwidth rules.
+
+Extended resource request
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is expected that neutron 20.0.0 (Yoga) will implement an extended resource
+request format via the the ``port-resource-request-groups`` neutron API
+extension. As of nova 24.0.0 (Xena), nova already supports this extension if
+every nova-compute service is upgraded to Xena version and the
+``[upgrade_levels]/compute`` configuration does not prevent the computes from
+using the latest RPC version.
+
+See :nova-doc:`the admin guide ` for
+administrative details.
diff --git a/api-guide/source/server_concepts.rst b/api-guide/source/server_concepts.rst
index 320711592e4..62d8331891a 100644
--- a/api-guide/source/server_concepts.rst
+++ b/api-guide/source/server_concepts.rst
@@ -59,9 +59,9 @@ server status is one of the following values:
- ``SHUTOFF``: The server was powered down by the user, either through the
OpenStack Compute API or from within the server. For example, the user
- issued a ``shutdown -h`` command from within the server. If the OpenStack
- Compute manager detects that the VM was powered down, it transitions the
- server to the SHUTOFF status.
+ issued a :command:`shutdown -h` command from within the server.
+ If the OpenStack Compute manager detects that the VM was powered down,
+ it transitions the server to the SHUTOFF status.
- ``SOFT_DELETED``: The server is marked as deleted but will remain in the
cloud for some configurable amount of time. While soft-deleted, an
@@ -69,21 +69,23 @@ server status is one of the following values:
expires, the server will be deleted permanently.
- ``SUSPENDED``: The server is suspended, either by request or
- necessity. This status appears for only the following hypervisors:
- XenServer/XCP, KVM, and ESXi. Administrative users may suspend a
- server if it is infrequently used or to perform system maintenance.
- When you suspend a server, its state is stored on disk, all
- memory is written to disk, and the server is stopped.
- Suspending a server is similar to placing a device in hibernation;
- memory and vCPUs become available to create other servers.
-
-- ``UNKNOWN``: The state of the server is unknown. Contact your cloud
- provider.
+ necessity. See the
+ :nova-doc:`feature support matrix `
+ for supported compute drivers. When you suspend a server, its state is stored
+ on disk, all memory is written to disk, and the server is stopped.
+ Suspending a server is similar to placing a device in hibernation and its
+ occupied resource will not be freed but rather kept for when the server is
+ resumed. If an instance is infrequently used and the occupied resource needs
+ to be freed to create other servers, it should be shelved.
+
+- ``UNKNOWN``: The state of the server is unknown. It could be because a part
+ of the infrastructure is temporarily down (see :doc:`down_cells`
+ for more information). Contact your cloud provider.
- ``VERIFY_RESIZE``: System is awaiting confirmation that the server is
operational after a move or resize.
-Server status is caculated from vm_state and task_state, which
+Server status is calculated from vm_state and task_state, which
are exposed to administrators:
- vm_state describes a VM's current stable (not transition) state. That is, if
@@ -93,8 +95,8 @@ are exposed to administrators:
Refer to :nova-doc:`VM States `.
- task_state represents what is happening to the instance at the
- current moment. These tasks can be generic, such as 'spawning', or specific,
- such as 'block_device_mapping'. These task states allow for a better view into
+ current moment. These tasks can be generic, such as `spawning`, or specific,
+ such as `block_device_mapping`. These task states allow for a better view into
what a server is doing.
Server creation
@@ -102,24 +104,34 @@ Server creation
Status Transition:
-``BUILD``
+- ``BUILD``
+
+ While the server is building there are several task state transitions that
+ can occur:
+
+ - ``scheduling``: The request is being scheduled to a compute node.
+ - ``networking``: Setting up network interfaces asynchronously.
+ - ``block_device_mapping``: Preparing block devices (local disks, volumes).
+ - ``spawning``: Creating the guest in the hypervisor.
+
+- ``ACTIVE``
-``ACTIVE``
+ The terminal state for a successfully built and running server.
-``ERROR`` (on error)
+- ``ERROR`` (on error)
-When you create a server, the operation asynchronously provisions a new
-server. The progress of this operation depends on several factors
-including location of the requested image, network I/O, host load, and
-the selected flavor. The progress of the request can be checked by
-performing a **GET** on /servers/*``id``*, which returns a progress
-attribute (from 0% to 100% complete). The full URL to the newly created
-server is returned through the ``Location`` header and is available as a
-``self`` and ``bookmark`` link in the server representation. Note that
-when creating a server, only the server ID, its links, and the
-administrative password are guaranteed to be returned in the request.
-You can retrieve additional attributes by performing subsequent **GET**
-operations on the server.
+ When you create a server, the operation asynchronously provisions a new
+ server. The progress of this operation depends on several factors
+ including location of the requested image, network I/O, host load, and
+ the selected flavor. The progress of the request can be checked by
+ performing a **GET** on /servers/*{server_id}*, which returns a progress
+ attribute (from 0% to 100% complete). The full URL to the newly created
+ server is returned through the ``Location`` header and is available as a
+ ``self`` and ``bookmark`` link in the server representation. Note that
+ when creating a server, only the server ID, its links, and the
+ administrative password are guaranteed to be returned in the request.
+ You can retrieve additional attributes by performing subsequent **GET**
+ operations on the server.
Server query
~~~~~~~~~~~~
@@ -131,10 +143,35 @@ by using query options.
For different user roles, the user has different query options set:
- For general user, there is limited set of attributes of the servers can be
- used as query option. ``reservation_id``, ``name``, ``status``, ``image``,
- ``flavor``, ``ip``, ``changes-since``, ``ip6``, ``tags``, ``tags-any``,
- ``not-tags``, ``not-tags-any`` are supported options to be used. Other
- options will be ignored by nova silently.
+ used as query option. The supported options are:
+
+ - ``changes-since``
+ - ``flavor``
+ - ``image``
+ - ``ip``
+ - ``ip6`` (New in version 2.5)
+ - ``name``
+ - ``not-tags`` (New in version 2.26)
+ - ``not-tags-any`` (New in version 2.26)
+ - ``reservation_id``
+ - ``status``
+ - ``tags`` (New in version 2.26)
+ - ``tags-any`` (New in version 2.26)
+ - ``changes-before`` (New in version 2.66)
+ - ``locked`` (New in version 2.73)
+ - ``availability_zone`` (New in version 2.83)
+ - ``config_drive`` (New in version 2.83)
+ - ``key_name`` (New in version 2.83)
+ - ``created_at`` (New in version 2.83)
+ - ``launched_at`` (New in version 2.83)
+ - ``terminated_at`` (New in version 2.83)
+ - ``power_state`` (New in version 2.83)
+ - ``task_state`` (New in version 2.83)
+ - ``vm_state`` (New in version 2.83)
+ - ``progress`` (New in version 2.83)
+ - ``user_id`` (New in version 2.83)
+
+ Other options will be ignored by nova silently.
- For administrator, most of the server attributes can be used as query
options. Before the Ocata release, the fields in the database schema of
@@ -144,31 +181,37 @@ For different user roles, the user has different query options set:
the query options are different from the attribute naming in the servers API
response.
-.. code::
- Precondition:
- there are 2 servers existing in cloud with following info:
+Precondition: there are 2 servers existing in cloud with following info::
- "servers": [
- {
- "name": "t1",
- "locked": "true",
- ...
- },
- {
- "name": "t2",
- "locked": "false",
- ...
- }
- ]
+ {
+ "servers": [
+ {
+ "name": "t1",
+ "OS-EXT-SRV-ATTR:host": "devstack1",
+ ...
+ },
+ {
+ "name": "t2",
+ "OS-EXT-SRV-ATTR:host": "devstack2",
+ ...
+ }
+ ]
+ }
+
+**Example: General user query server with administrator only options**
- **Example: General user query server with administrator only options**
+Request with non-administrator context: ``GET /servers/detail?host=devstack1``
- Request with non-administrator context:
- GET /servers/detail?locked=1
- Note that 'locked' is not returned through API layer
+.. note::
+
+ The ``host`` query parameter is only for administrator users and
+ the query parameter is ignored if specified by non-administrator users.
+ Thus the API returns servers of both ``devstack1`` and ``devstack2``
+ in this example.
+
+Response::
- Response:
{
"servers": [
{
@@ -182,12 +225,12 @@ For different user roles, the user has different query options set:
]
}
- **Example: Administrator query server with administrator only options**
+**Example: Administrator query server with administrator only options**
+
+Request with administrator context: ``GET /servers/detail?host=devstack1``
- Request with administrator context:
- GET /servers/detail?locked=1
+Response::
- Response:
{
"servers": [
{
@@ -197,10 +240,13 @@ For different user roles, the user has different query options set:
]
}
-There are also some speical query options:
+There are also some special query options:
- ``changes-since`` returns the servers updated after the given time.
- Please see: :doc:`polling_changes-since_parameter`
+ Please see: :doc:`polling_changes`
+
+- ``changes-before`` returns the servers updated before the given time.
+ Please see: :doc:`polling_changes`
- ``deleted`` returns (or excludes) deleted servers
@@ -210,52 +256,80 @@ There are also some speical query options:
- ``all_tenants`` is an administrator query option, which allows the
administrator to query the servers in any tenant.
-.. code::
- **Example: User query server with special keys changes-since**
+**Example: User query server with special keys changes-since or changes-before**
+
+Request: ``GET /servers/detail``
- Precondition:
- GET /servers/detail
+Response::
- Response:
{
"servers": [
{
- "name": "t1"
- "updated": "2015-12-15T15:55:52Z"
+ "name": "t1",
+ "updated": "2015-12-15T15:55:52Z",
...
},
{
"name": "t2",
- "updated": "2015-12-17T15:55:52Z"
+ "updated": "2015-12-17T15:55:52Z",
...
}
]
}
- GET /servers/detail?changes-since='2015-12-16T15:55:52Z'
+Request: ``GET /servers/detail?changes-since='2015-12-16T15:55:52Z'``
+
+Response::
- Response:
{
{
"name": "t2",
- "updated": "2015-12-17T15:55:52Z"
+ "updated": "2015-12-17T15:55:52Z",
...
}
}
+Request: ``GET /servers/detail?changes-before='2015-12-16T15:55:52Z'``
+
+Response::
+
+ {
+ {
+ "name": "t1",
+ "updated": "2015-12-15T15:55:52Z",
+ ...
+ }
+ }
+
+Request:
+``GET /servers/detail?changes-since='2015-12-10T15:55:52Z'&changes-before='2015-12-28T15:55:52Z'``
+
+Response::
+
+ {
+ "servers": [
+ {
+ "name": "t1",
+ "updated": "2015-12-15T15:55:52Z",
+ ...
+ },
+ {
+ "name": "t2",
+ "updated": "2015-12-17T15:55:52Z",
+ ...
+ }
+ ]
+ }
+
There are two kinds of matching in query options: Exact matching and
regex matching.
-.. code::
-
- **Example: User query server using exact matching on host**
+**Example: User query server using exact matching on host**
- Precondition:
- Request with administrator context:
- GET /servers/detail
+Request with administrator context: ``GET /servers/detail``
- Response:
+Response::
{
"servers": [
@@ -272,10 +346,9 @@ regex matching.
]
}
- Request with administrator context:
- GET /servers/detail?host=devstack
+Request with administrator context: ``GET /servers/detail?host=devstack``
- Response:
+Response::
{
"servers": [
@@ -287,13 +360,11 @@ regex matching.
]
}
- **Example: Query server using regex matching on name**
+**Example: Query server using regex matching on name**
- Precondition:
- Request with administrator context:
- GET /servers/detail
+Request with administrator context: ``GET /servers/detail``
- Response:
+Response::
{
"servers": [
@@ -316,10 +387,9 @@ regex matching.
]
}
- Request with administrator context:
- GET /servers/detail?name=t1
+Request with administrator context: ``GET /servers/detail?name=t1``
- Response:
+Response::
{
"servers": [
@@ -338,14 +408,12 @@ regex matching.
]
}
- **Example: User query server using exact matching on host and
- regex matching on name**
+**Example: User query server using exact matching on host and regex
+matching on name**
- Precondition:
- Request with administrator context:
- GET /servers/detail
+Request with administrator context: ``GET /servers/detail``
- Response:
+Response::
{
"servers": [
@@ -367,10 +435,10 @@ regex matching.
]
}
- Request with administrator context:
- GET /servers/detail?host=devstack1&name=test
+Request with administrator context:
+``GET /servers/detail?host=devstack1&name=test``
- Response:
+Response::
{
"servers": [
@@ -382,16 +450,10 @@ regex matching.
]
}
- "name": "t2",
- "updated": "2015-12-17T15:55:52Z"
- ...
- }
- ]
- }
+Request: ``GET /servers/detail?changes-since='2015-12-16T15:55:52Z'``
- GET /servers/detail?changes-since='2015-12-16T15:55:52Z'
+Response::
- Response:
{
{
"name": "t2",
@@ -436,9 +498,9 @@ Server actions
flavor, in essence, scaling the server up or down. The original
server is saved for a period of time to allow rollback if there is a
problem. All resizes should be tested and explicitly confirmed, at
- which time the original server is removed. All resizes are
- automatically confirmed after 24 hours if you do not confirm or
- revert them.
+ which time the original server is removed. The resized server may be
+ automatically confirmed based on the administrator's configuration of
+ the deployment.
Confirm resize action will delete the old server in the virt layer.
The spawned server in the virt layer will be used from then on.
@@ -446,12 +508,6 @@ Server actions
spawned in the virt layer and revert all changes. The original server
will be used from then on.
- Also, there is a periodic task configured by configuration option
- resize_confirm_window(in seconds), if this value is not 0, nova compute
- will check whether the server is in resized state longer than
- value of resize_confirm_window, it will automatically confirm the resize
- of the server.
-
- **Pause**, **Unpause**
You can pause a server by making a pause request. This request stores
@@ -535,13 +591,62 @@ Server actions
- **Lock**, **Unlock**
- Lock a server so no further actions are allowed to the server. This can
- be done by either administrator or the server's owner. By default, only owner
- or administrator can lock the sever, and administrator can overwrite owner's lock.
+ Lock a server so the following actions by non-admin users are not
+ allowed to the server.
+
+ - Delete Server
+ - Change Administrative Password (changePassword Action)
+ - Confirm Resized Server (confirmResize Action)
+ - Force-Delete Server (forceDelete Action)
+ - Pause Server (pause Action)
+ - Reboot Server (reboot Action)
+ - Rebuild Server (rebuild Action)
+ - Rescue Server (rescue Action)
+ - Resize Server (resize Action)
+ - Restore Soft-Deleted Instance (restore Action)
+ - Resume Suspended Server (resume Action)
+ - Revert Resized Server (revertResize Action)
+ - Shelve-Offload (Remove) Server (shelveOffload Action)
+ - Shelve Server (shelve Action)
+ - Start Server (os-start Action)
+ - Stop Server (os-stop Action)
+ - Suspend Server (suspend Action)
+ - Trigger Crash Dump In Server
+ - Unpause Server (unpause Action)
+ - Unrescue Server (unrescue Action)
+ - Unshelve (Restore) Shelved Server (unshelve Action)
+ - Attach a volume to an instance
+ - Update a volume attachment
+ - Detach a volume from an instance
+ - Create Interface
+ - Detach Interface
+ - Create Or Update Metadata Item
+ - Create or Update Metadata Items
+ - Delete Metadata Item
+ - Replace Metadata Items
+ - Add (Associate) Fixed Ip (addFixedIp Action) (DEPRECATED)
+ - Remove (Disassociate) Fixed Ip (removeFixedIp Action) (DEPRECATED)
+
+ ..
+ NOTE(takashin):
+ The following APIs can be performed by administrators only by default.
+ So they are not listed in the above list.
+
+ - Migrate Server (migrate Action)
+ - Live-Migrate Server (os-migrateLive Action)
+ - Force Migration Complete Action (force_complete Action)
+ - Delete (Abort) Migration
+ - Inject Network Information (injectNetworkInfo Action)
+ - Reset Networking On A Server (resetNetwork Action)
+
+ But administrators can perform the actions on the server
+ even though the server is locked. By default, only owner or administrator
+ can lock the sever, and administrator can overwrite owner's lock along with
+ the locked_reason if it is specified.
Unlock will unlock a server in locked state so additional
- operations can be performed on the server. By default, only owner or
- administrator can unlock the server.
+ operations can be performed on the server by non-admin users.
+ By default, only owner or administrator can unlock the server.
- **Rescue**, **Unrescue**
@@ -603,12 +708,85 @@ limit.
Block Device Mapping
~~~~~~~~~~~~~~~~~~~~
-TODO: Add some description about BDM.
+Simply speaking, Block Device Mapping describes how block devices are
+exposed to the server.
+
+For some historical reasons, nova has two ways to mention the block device
+mapping in server creation request body:
+
+- ``block_device_mapping``: This is the legacy way and supports backward
+ compatibility for EC2 API.
+- ``block_device_mapping_v2``: This is the recommended format to specify
+ Block Device Mapping information in server creation request body.
+
+Users cannot mix the two formats in the same request.
+
+For more information, refer to `Block Device Mapping
+`_.
+
+For the full list of ``block_device_mapping_v2`` parameters available when
+creating a server, see the `API reference
+`_.
+
+**Example for block_device_mapping_v2**
+
+This will create a 100GB size volume type block device from an image with UUID
+of ``bb02b1a3-bc77-4d17-ab5b-421d89850fca``. It will be used as the first order
+boot device (``boot_index=0``), and this block device will not be deleted after
+we terminate the server. Note that the ``imageRef`` parameter is not required
+in this case since we are creating a volume-backed server.
+
+.. code-block:: json
+
+ {
+ "server": {
+ "name": "volume-backed-server-test",
+ "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37",
+ "block_device_mapping_v2": [
+ {
+ "boot_index": 0,
+ "uuid": "bb02b1a3-bc77-4d17-ab5b-421d89850fca",
+ "volume_size": "100",
+ "source_type": "image",
+ "destination_type": "volume",
+ "delete_on_termination": false
+ }
+ ]
+ }
+ }
Scheduler Hints
~~~~~~~~~~~~~~~
-TODO: Add description about how to custom scheduling policy for server booting.
+Scheduler hints are a way for the user to influence on which host the scheduler
+places a server. They are pre-determined key-value pairs specified as a
+dictionary separate from the main ``server`` dictionary in the server create
+request. Available scheduler hints vary from cloud to cloud, depending on the
+`cloud's configuration`_.
+
+.. code-block:: json
+
+ {
+ "server": {
+ "name": "server-in-group",
+ "imageRef": "52415800-8b69-11e0-9b19-734f6f006e54",
+ "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37"
+ },
+ "os:scheduler_hints": {
+ "group": "05a81485-010f-4df1-bbec-7821c85686e8"
+ }
+ }
+
+
+For more information on how to specify scheduler hints refer to
+`the create-server-detail Request section`_ in the Compute API reference.
+
+For more information on how scheduler hints are different from flavor extra
+specs, refer to `this document`_.
+
+.. _cloud's configuration: https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html
+.. _the create-server-detail Request section: https://docs.openstack.org/api-ref/compute/?expanded=create-server-detail#create-server
+.. _this document: https://docs.openstack.org/nova/latest/reference/scheduler-hints-vs-flavor-extra-specs.html#scheduler-hints
Server Consoles
~~~~~~~~~~~~~~~
@@ -616,12 +794,11 @@ Server Consoles
Server Consoles can also be supplied after server launched. There are several
server console services available. First, users can get the console output
from the specified server and can limit the lines of console text by setting
-the length. Second, users can access multiple types of remote consoles. The
-user can use novnc, xvpvnc, rdp-html5, spice-html5, serial, and webmks(start
-from microversion 2.8) through either the OpenStack dashboard or the command
-line. Refer to :nova-doc:`Configure remote console access
-`. Specifically for Xenserver, it provides
-the ability to create, delete, detail, list specified server vnc consoles.
+the length. Secondly, users can access multiple types of remote consoles. The
+user can use ``novnc``, ``rdp-html5``, ``spice-html5``, ``serial``, and
+``webmks`` (starting from microversion 2.8) through either the OpenStack
+dashboard or the command line. Refer to :nova-doc:`Configure remote console
+access `.
Server networks
~~~~~~~~~~~~~~~
@@ -631,28 +808,6 @@ time. One or more networks can be specified. User can also specify a
specific port on the network or the fixed IP address to assign to the
server interface.
-Considerations
-~~~~~~~~~~~~~~
-
-- The maximum limit refers to the number of bytes in the decoded data
- and not the number of characters in the encoded data.
-
-- The maximum number of file path/content pairs that you can supply is
- also determined by the compute provider and is defined by the
- maxPersonality absolute limit.
-
-- The absolute limit, maxPersonalitySize, is a byte limit that is
- guaranteed to apply to all images in the deployment. Providers can
- set additional per-image personality limits.
-
-- The file injection might not occur until after the server is built and
- booted.
-
-- After file injection, personality files are accessible by only system
- administrators. For example, on Linux, all files have root and the root
- group as the owner and group owner, respectively, and allow user and
- group read access only (octal 440).
-
Server access addresses
~~~~~~~~~~~~~~~~~~~~~~~
@@ -672,7 +827,7 @@ assigned at creation time.
**Example: Create server with access IP: JSON request**
-.. code::
+.. code-block:: json
{
"server": {
@@ -690,7 +845,7 @@ assigned at creation time.
**Example: Create server with multiple access IPs: JSON request**
-.. code::
+.. code-block:: json
{
"server": {
@@ -769,7 +924,7 @@ a cloud:
This process can be repeated until the whole cloud has been updated,
usually using a pool of empty hosts instead of just one.
-- **Resource Optimization**
+- **Resource Optimization**
To reduce energy usage, some cloud operators will try and move
servers so they fit into the minimum number of hosts, allowing
@@ -880,10 +1035,11 @@ Configure Guest OS
Metadata API
------------
-Nova provides a metadata api for servers to retrieve server specific metadata.
-Neutron ensures this metadata api can be accessed through a predefined ip
-address (169.254.169.254). For more details, see :nova-doc:`Metadata Service
-`.
+
+Nova provides a metadata API for servers to retrieve server specific metadata.
+Neutron ensures this metadata API can be accessed through a predefined IP
+address, ``169.254.169.254``. For more details, refer to the :nova-doc:`user
+guide `.
Config Drive
------------
@@ -891,20 +1047,19 @@ Config Drive
Nova is able to write metadata to a special configuration drive that attaches
to the server when it boots. The server can mount this drive and read files
from it to get information that is normally available through the metadata
-service. For more details, see :nova-doc:`Config Drive
-`.
+service. For more details, refer to the :nova-doc:`user guide
+`.
User data
---------
+
A user data file is a special key in the metadata service that holds a file
that cloud-aware applications in the server can access.
-Nova has two ways to send user data to the deployed server, one is by
-metadata service to let server able to access to its metadata through
-a predefined ip address (169.254.169.254), then other way is to use config
-drive which will wrap metadata into a iso9660 or vfat format disk so that
-the deployed server can consume it by active engines such as cloud-init
-during its boot process.
+This information can be accessed via the metadata API or a config drive. The
+latter allows the deployed server to consume it by active engines such as
+cloud-init during its boot process, where network connectivity may not be an
+option.
Server personality
------------------
@@ -923,3 +1078,24 @@ Follow these guidelines when you inject files:
- Encode the file contents as a Base64 string. The maximum size of the
file contents is determined by the compute provider and may vary
based on the image that is used to create the server.
+
+Considerations:
+
+- The maximum limit refers to the number of bytes in the decoded data
+ and not the number of characters in the encoded data.
+
+- The maximum number of file path/content pairs that you can supply is
+ also determined by the compute provider and is defined by the
+ maxPersonality absolute limit.
+
+- The absolute limit, maxPersonalitySize, is a byte limit that is
+ guaranteed to apply to all images in the deployment. Providers can
+ set additional per-image personality limits.
+
+- The file injection might not occur until after the server is built and
+ booted.
+
+- After file injection, personality files are accessible by only system
+ administrators. For example, on Linux, all files have root and the root
+ group as the owner and group owner, respectively, and allow user and
+ group read access only (octal 440).
diff --git a/api-guide/source/users.rst b/api-guide/source/users.rst
index 7128a5ac66f..a0b74374a2f 100644
--- a/api-guide/source/users.rst
+++ b/api-guide/source/users.rst
@@ -27,7 +27,9 @@ Keystone middleware is used to authenticate users and identify their roles.
The Compute API uses these roles, along with oslo.policy, to decide
what the user is authorized to do.
-TODO - link to compute admin guide for details.
+Refer to the to
+:nova-doc:`compute admin guide `
+for details.
Personas used in this guide
===========================
@@ -47,16 +49,18 @@ cloud administrator permissions, such as a read-only role that is able to view
a lists of servers for a specific tenant but is not able to perform any
actions on any of them.
-Note: this is not attempting to be an exhaustive set of personas that consider
-various facets of the different users but instead aims to be a minimal set of
-users such that we use a consistent terminology throughout this document.
+.. note::
-TODO - could assign names to these users, or similar, to make it more "real".
+ This is not attempting to be an exhaustive set of personas that consider
+ various facets of the different users but instead aims to be a minimal set of
+ users such that we use a consistent terminology throughout this document.
Discovering Policy
==================
An API to discover what actions you are authorized to perform is still a work
-in progress. Currently this reported by a HTTP 403 error.
+in progress. Currently this reported by a HTTP 403
+:ref:`error `.
-TODO - link to the doc on errors.
+Refer to the :nova-doc:`configuration guide ` for a list
+of policy rules along with their default values.
diff --git a/api-guide/source/versions.rst b/api-guide/source/versions.rst
index ee8ce9fed6c..4019899e812 100644
--- a/api-guide/source/versions.rst
+++ b/api-guide/source/versions.rst
@@ -4,8 +4,8 @@ Versions
The OpenStack Compute API uses both a URI and a MIME type versioning
scheme. In the URI scheme, the first element of the path contains the
-target version identifier (e.g. https://servers.api.openstack.org/
-v2.1/...). The MIME type versioning scheme uses HTTP content negotiation
+target version identifier (e.g. `https://servers.api.openstack.org/
+v2.1/`...). The MIME type versioning scheme uses HTTP content negotiation
where the ``Accept`` or ``Content-Type`` headers contains a MIME type
that identifies the version (application/vnd.openstack.compute.v2.1+json).
A version MIME type is always linked to a base MIME type, such as
@@ -37,7 +37,7 @@ Permanent Links
The MIME type versioning approach allows for creating of permanent
links, because the version scheme is not specified in the URI path:
-https://api.servers.openstack.org/224532/servers/123.
+`https://api.servers.openstack.org/224532/servers/123`.
If a request is made without a version specified in the URI or via HTTP
headers, then a multiple-choices response (300) follows that provides
@@ -99,13 +99,13 @@ everything following that truncated) returned from the authentication system.
You can also obtain additional information about a specific version by
performing a **GET** on the base version URL (such as,
-``https://servers.api.openstack.org/v2.1/``). Version request URLs must
-always end with a trailing slash (``/``). If you omit the slash, the
+`https://servers.api.openstack.org/v2.1/`). Version request URLs must
+always end with a trailing slash (`/`). If you omit the slash, the
server might respond with a 302 redirection request.
For examples of the list versions and get version details requests and
-responses, see `*API versions*
-`__.
+responses, see `API versions
+`__.
The detailed version response contains pointers to both a human-readable
and a machine-processable description of the API service.
diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py
index 53c90dd1b88..ddcca926e72 100644
--- a/api-ref/source/conf.py
+++ b/api-ref/source/conf.py
@@ -22,8 +22,6 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-from nova.version import version_info
-
extensions = [
'openstackdocstheme',
@@ -42,25 +40,15 @@
master_doc = 'index'
# General information about the project.
-project = u'Compute API Reference'
copyright = u'2010-present, OpenStack Foundation'
# openstackdocstheme options
-repository_name = 'openstack/nova'
-bug_project = 'nova'
-bug_tag = 'api-ref'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The full version, including alpha/beta/rc tags.
-release = version_info.release_string()
-# The short X.Y version.
-version = version_info.version_string()
+openstackdocs_repo_name = 'openstack/nova'
+openstackdocs_bug_project = 'nova'
+openstackdocs_bug_tag = 'api-ref'
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
@@ -75,10 +63,6 @@
"sidebar_mode": "toc",
}
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
diff --git a/api-ref/source/extensions.inc b/api-ref/source/extensions.inc
index 28123a89d05..7621dd3db47 100644
--- a/api-ref/source/extensions.inc
+++ b/api-ref/source/extensions.inc
@@ -19,9 +19,6 @@ code to interact with every cloud.
As such, the entire extensions concept is deprecated, and will be
removed in the near future.
-For information about extensions, see `Extensions
-`__.
-
List Extensions
===============
diff --git a/api-ref/source/flavors.inc b/api-ref/source/flavors.inc
index d3d8e4d453a..0216ce29838 100644
--- a/api-ref/source/flavors.inc
+++ b/api-ref/source/flavors.inc
@@ -108,9 +108,9 @@ Response
- extra_specs: extra_specs_2_61
-**Example Create Flavor (v2.61)**
+**Example Create Flavor (v2.75)**
-.. literalinclude:: ../../doc/api_samples/flavor-manage/v2.61/flavor-create-post-resp.json
+.. literalinclude:: ../../doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json
:language: javascript
List Flavors With Details
@@ -158,9 +158,9 @@ Response
- os-flavor-access:is_public: flavor_is_public
- extra_specs: extra_specs_2_61
-**Example List Flavors With Details (v2.61)**
+**Example List Flavors With Details (v2.75)**
-.. literalinclude:: ../../doc/api_samples/flavors/v2.61/flavors-detail-resp.json
+.. literalinclude:: ../../doc/api_samples/flavors/v2.75/flavors-detail-resp.json
:language: javascript
Show Flavor Details
@@ -201,9 +201,9 @@ Response
- os-flavor-access:is_public: flavor_is_public
- extra_specs: extra_specs_2_61
-**Example Show Flavor Details (v2.61)**
+**Example Show Flavor Details (v2.75)**
-.. literalinclude:: ../../doc/api_samples/flavors/v2.61/flavor-get-resp.json
+.. literalinclude:: ../../doc/api_samples/flavors/v2.75/flavor-get-resp.json
:language: javascript
Update Flavor Description
@@ -244,7 +244,7 @@ Response
- flavor: flavor
- name: flavor_name
- - description: flavor_description_resp
+ - description: flavor_description_resp_no_min
- id: flavor_id_body
- ram: flavor_ram
- disk: flavor_disk
@@ -258,9 +258,9 @@ Response
- extra_specs: extra_specs_2_61
-**Example Update Flavor Description (v2.61)**
+**Example Update Flavor Description (v2.75)**
-.. literalinclude:: ../../doc/api_samples/flavor-manage/v2.61/flavor-update-resp.json
+.. literalinclude:: ../../doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json
:language: javascript
Delete Flavor
diff --git a/api-ref/source/images.inc b/api-ref/source/images.inc
index 9b7bb3a25dc..621641281f7 100644
--- a/api-ref/source/images.inc
+++ b/api-ref/source/images.inc
@@ -13,7 +13,7 @@
The image metadata APIs will fail with a 404 starting from
microversion 2.39.
See: `Relevant Image APIs
- `__.
+ `__.
Lists, shows details and deletes images.
Also sets, lists, shows details, create, update and deletes image metadata.
@@ -21,7 +21,7 @@ Also sets, lists, shows details, create, update and deletes image metadata.
An image is a collection of files that you use to create and rebuild a
server. By default, operators provide pre-built operating system images.
You can also create custom images. See: `Create Image Action
-`__.
+`__.
By default, the ``policy.json`` file authorizes all users to view the
image size in the ``OS-EXT-IMG-SIZE:size`` extended attribute.
@@ -103,7 +103,7 @@ Response
- name: image_name
- minRam: minRam_body
- minDisk: minDisk_body
- - metadata: metadata_object
+ - metadata: metadata_object
- created: created
- updated: updated
- status: image_status
@@ -147,7 +147,7 @@ Response
- name: image_name
- minRam: minRam_body
- minDisk: minDisk_body
- - metadata: metadata_object
+ - metadata: metadata_object
- created: created
- updated: updated
- status: image_status
@@ -208,7 +208,7 @@ Response
.. rest_parameters:: parameters.yaml
- - metadata: metadata_object
+ - metadata: metadata_object
**Example List Image Metadata Details: JSON response**
@@ -233,7 +233,7 @@ Request
.. rest_parameters:: parameters.yaml
- image_id: image_id
- - metadata: metadata_object
+ - metadata: metadata_object
**Example Create Image Metadata: JSON request**
@@ -245,7 +245,7 @@ Response
.. rest_parameters:: parameters.yaml
- - metadata: metadata_object
+ - metadata: metadata_object
**Example Create Image Metadata: JSON response**
@@ -270,7 +270,7 @@ Request
.. rest_parameters:: parameters.yaml
- image_id: image_id
- - metadata: metadata_object
+ - metadata: metadata_object
**Example Update Image Metadata: JSON request**
@@ -282,7 +282,7 @@ Response
.. rest_parameters:: parameters.yaml
- - metadata: metadata_object
+ - metadata: metadata_object
**Example Update Image Metadata: JSON response**
diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst
index e52c84cc526..e0bba1504a5 100644
--- a/api-ref/source/index.rst
+++ b/api-ref/source/index.rst
@@ -6,7 +6,7 @@
This is a reference for the OpenStack Compute API which is provided by the Nova
project. To learn more about the OpenStack Compute API concepts, please refer to
-the `API guide `_.
+the `API guide `_.
.. rest_expand_all::
@@ -16,13 +16,13 @@ the `API guide `_.
.. include:: servers.inc
.. include:: servers-actions.inc
.. include:: servers-action-fixed-ip.inc
-.. include:: servers-action-evacuate.inc
.. include:: servers-action-deferred-delete.inc
.. include:: servers-action-console-output.inc
.. include:: servers-action-shelve.inc
.. include:: servers-action-crash-dump.inc
.. include:: servers-action-remote-consoles.inc
.. include:: servers-admin-action.inc
+.. include:: servers-action-evacuate.inc
.. include:: servers-remote-consoles.inc
.. include:: server-security-groups.inc
.. include:: diagnostics.inc
@@ -37,12 +37,9 @@ the `API guide `_.
.. include:: os-flavor-extra-specs.inc
.. include:: os-keypairs.inc
.. include:: limits.inc
-.. include:: os-agents.inc
.. include:: os-aggregates.inc
.. include:: os-assisted-volume-snapshots.inc
.. include:: os-availability-zone.inc
-.. include:: os-cells.inc
-.. include:: os-consoles.inc
.. include:: os-hypervisors.inc
.. include:: os-instance-usage-audit-log.inc
.. include:: os-migrations.inc
@@ -54,6 +51,15 @@ the `API guide `_.
.. include:: os-services.inc
.. include:: os-simple-tenant-usage.inc
.. include:: os-server-external-events.inc
+.. include:: server-topology.inc
+
+===============
+Deprecated APIs
+===============
+
+This section contains references for APIs which are deprecated and usually
+limited to some maximum microversion.
+
.. include:: extensions.inc
.. include:: os-networks.inc
.. include:: os-volumes.inc
@@ -63,7 +69,6 @@ the `API guide `_.
.. include:: os-floating-ip-pools.inc
.. include:: os-floating-ips.inc
.. include:: os-security-groups.inc
-.. include:: os-security-group-default-rules.inc
.. include:: os-security-group-rules.inc
.. include:: os-hosts.inc
@@ -81,3 +86,7 @@ Compute API in the past, but no longer exist.
.. include:: os-fixed-ips.inc
.. include:: os-floating-ips-bulk.inc
.. include:: os-floating-ip-dns.inc
+.. include:: os-cells.inc
+.. include:: os-consoles.inc
+.. include:: os-security-group-default-rules.inc
+.. include:: os-agents.inc
diff --git a/api-ref/source/limits.inc b/api-ref/source/limits.inc
index 2fb3030780f..2329c67faaf 100644
--- a/api-ref/source/limits.inc
+++ b/api-ref/source/limits.inc
@@ -32,25 +32,25 @@ Response
- limits: limits
- absolute: limits_absolutes
- - maxImageMeta: image_metadata_items
- - maxPersonality: injected_files
- - maxPersonalitySize: injected_file_content_bytes
- - maxSecurityGroupRules: security_group_rules_quota
- - maxSecurityGroups: security_groups_quota
- maxServerGroupMembers: server_group_members
- maxServerGroups: server_groups
- maxServerMeta: metadata_items
- maxTotalCores: cores
- - maxTotalFloatingIps: floating_ips
- maxTotalInstances: instances
- maxTotalKeypairs: key_pairs
- maxTotalRAMSize: ram
- totalCoresUsed: total_cores_used
- - totalFloatingIpsUsed: total_floatingips_used
- totalInstancesUsed: total_instances_used
- totalRAMUsed: total_ram_used
- - totalSecurityGroupsUsed: total_security_groups_used
- totalServerGroupsUsed: total_server_groups_used
+ - maxSecurityGroupRules: security_group_rules_quota
+ - maxSecurityGroups: security_groups_quota
+ - maxTotalFloatingIps: floating_ips
+ - totalFloatingIpsUsed: total_floatingips_used
+ - totalSecurityGroupsUsed: total_security_groups_used
+ - maxImageMeta: image_metadata_items
+ - maxPersonality: injected_files
+ - maxPersonalitySize: injected_file_content_bytes
- rate: limits_rates
**Example Show Rate And Absolute Limits: JSON response**
diff --git a/api-ref/source/os-agents.inc b/api-ref/source/os-agents.inc
index 33bd9fbd3a7..f9ab86a62c3 100644
--- a/api-ref/source/os-agents.inc
+++ b/api-ref/source/os-agents.inc
@@ -11,6 +11,12 @@ hypervisor-specific extension is currently only for the Xen driver. Use of
guest agents is possible only if the underlying service provider uses
the Xen driver.
+.. warning::
+
+ These APIs only works with the Xen virt driver, which was deprecated in the
+ 20.0.0 (Train) release.
+ They were removed in the 22.0.0 (Victoria) release.
+
List Agent Builds
=================
@@ -20,7 +26,7 @@ Lists agent builds.
Normal response codes: 200
-Error response codes: unauthorized(401), forbidden(403)
+Error response codes: unauthorized(401), forbidden(403), gone(410)
Request
-------
@@ -58,7 +64,8 @@ Creates an agent build.
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+conflict(409), gone(410)
Request
-------
@@ -106,7 +113,8 @@ Updates an agent build.
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+itemNotFound(404), gone(410)
Request
-------
@@ -150,7 +158,8 @@ Deletes an existing agent build.
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+itemNotFound(404), gone(410)
Request
-------
diff --git a/api-ref/source/os-aggregates.inc b/api-ref/source/os-aggregates.inc
index ecfefe59ac6..6021c8e8607 100644
--- a/api-ref/source/os-aggregates.inc
+++ b/api-ref/source/os-aggregates.inc
@@ -5,8 +5,12 @@
================================
Creates and manages host aggregates. An aggregate assigns metadata to
-groups of compute nodes. Aggregates are only visible to the cloud
-provider.
+groups of compute nodes.
+
+Policy defaults enable only users with the administrative role to perform
+operations with aggregates. Cloud providers can change these permissions
+through `policy file configuration
+`__.
List Aggregates
===============
@@ -31,7 +35,7 @@ Response
- deleted: deleted
- hosts: aggregate_host_list
- id: aggregate_id_body
- - metadata: aggregate_metadata
+ - metadata: aggregate_metadata_response
- name: aggregate_name
- updated_at: updated_consider_null
- uuid: aggregate_uuid
@@ -60,7 +64,7 @@ Request
- aggregate: aggregate
- name: aggregate_name
- - availability_zone: aggregate_az_optional
+ - availability_zone: aggregate_az_optional_create
**Example Create Aggregate: JSON request**
@@ -117,7 +121,7 @@ Response
- deleted: deleted
- hosts: hosts
- id: aggregate_id_body
- - metadata: aggregate_metadata
+ - metadata: aggregate_metadata_response
- name: aggregate_name
- updated_at: updated_consider_null
- uuid: aggregate_uuid
@@ -149,7 +153,7 @@ Request
- aggregate_id: aggregate_id
- aggregate: aggregate
- name: aggregate_name_optional
- - availability_zone: aggregate_az_optional
+ - availability_zone: aggregate_az_optional_update
**Example Update Aggregate: JSON request**
@@ -168,7 +172,7 @@ Response
- deleted: deleted
- hosts: hosts
- id: aggregate_id_body
- - metadata: aggregate_metadata
+ - metadata: aggregate_metadata_response
- name: aggregate_name
- updated_at: updated_consider_null
- uuid: aggregate_uuid
@@ -241,7 +245,7 @@ Response
- deleted: deleted
- hosts: hosts
- id: aggregate_id_body
- - metadata: aggregate_metadata
+ - metadata: aggregate_metadata_response
- name: aggregate_name
- updated_at: updated_consider_null
- uuid: aggregate_uuid
@@ -291,7 +295,7 @@ Response
- deleted: deleted
- hosts: hosts
- id: aggregate_id_body
- - metadata: aggregate_metadata
+ - metadata: aggregate_metadata_response
- name: aggregate_name
- updated_at: updated_consider_null
- uuid: aggregate_uuid
@@ -322,7 +326,7 @@ Request
- aggregate_id: aggregate_id
- set_metadata: set_metadata
- - metadata: metadata_object
+ - metadata: aggregate_metadata_request
**Example Create Or Update Aggregate Metadata: JSON request**
@@ -341,7 +345,7 @@ Response
- deleted: deleted
- hosts: hosts
- id: aggregate_id_body
- - metadata: aggregate_metadata
+ - metadata: aggregate_metadata_response
- name: aggregate_name
- updated_at: updated_consider_null
- uuid: aggregate_uuid
@@ -350,3 +354,36 @@ Response
.. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregates-metadata-post-resp.json
:language: javascript
+
+Request Image Pre-caching for Aggregate
+=======================================
+
+.. rest_method:: POST /os-aggregates/{aggregate_id}/images
+
+Requests that a set of images be pre-cached on compute nodes within the referenced aggregate.
+
+This API is available starting with microversion 2.81.
+
+Normal response codes: 202
+
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+itemNotFound(404)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - aggregate_id: aggregate_id
+ - cache: cache
+ - cache.id: image_id_body
+
+**Example Request Image pre-caching for Aggregate (v2.81): JSON request**
+
+.. literalinclude:: ../../doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json
+ :language: javascript
+
+Response
+--------
+
+The response body is always empty.
diff --git a/api-ref/source/os-availability-zone.inc b/api-ref/source/os-availability-zone.inc
index 9869dc596c3..bcd9a3081ba 100644
--- a/api-ref/source/os-availability-zone.inc
+++ b/api-ref/source/os-availability-zone.inc
@@ -1,5 +1,7 @@
.. -*- rst -*-
+.. _os-availability-zone:
+
===========================================
Availability zones (os-availability-zone)
===========================================
diff --git a/api-ref/source/os-baremetal-nodes.inc b/api-ref/source/os-baremetal-nodes.inc
index 0208bb453c8..c79b5bd77c3 100644
--- a/api-ref/source/os-baremetal-nodes.inc
+++ b/api-ref/source/os-baremetal-nodes.inc
@@ -11,7 +11,7 @@
Nova has deprecated all the proxy APIs and users should use the native
APIs instead. These will fail with a 404 starting from microversion 2.36.
See: `Relevant Bare metal APIs
- `__.
+ `__.
Bare metal nodes.
diff --git a/api-ref/source/os-cells.inc b/api-ref/source/os-cells.inc
index cbc20ad42b4..452f03459d4 100644
--- a/api-ref/source/os-cells.inc
+++ b/api-ref/source/os-cells.inc
@@ -1,8 +1,4 @@
.. -*- rst -*-
-.. needs:parameter_verification
-.. needs:example_verification
-.. needs:body_verification
-
==============================
Cells (os-cells, capacities)
@@ -11,10 +7,13 @@
Adds neighbor cells, lists neighbor cells, and shows the capabilities of
the local cell. By default, only administrators can manage cells.
-.. note:: These APIs refer to a Cells v1 deployment which is optional and not
- recommended for new deployments of Nova. These are not used with Cells v2
- which is required beginning with the 15.0.0 Ocata release where all Nova
- deployments consist of at least one Cells v2 cell.
+.. warning::
+
+ These APIs refer to a Cells v1 deployment which was deprecated in the 16.0.0
+ Pike release. These are not used with Cells v2 which is required beginning
+ with the 15.0.0 Ocata release where all Nova deployments consist of at least
+ one Cells v2 cell.
+ They were removed in the 20.0.0 Train release.
List Cells
==========
@@ -26,7 +25,7 @@ Lists cells.
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-NotImplemented(501)
+gone(410), notImplemented(501)
Request
-------
@@ -37,8 +36,6 @@ Request
- limit: limit_simple
- offset: offset_simple
-.. TODO(cdent): How do we indicate optionality of a URI parameter?
-
Response
--------
@@ -54,14 +51,10 @@ Create Cell
Create a new cell.
-Normal response code: 200
+Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-NotImplemented(501)
-
-.. TODO(cdent): need to figure out body stuff for request and response
-
-.. TODO(cdent): need a sample
+gone(410), notImplemented(501)
Capacities
==========
@@ -70,10 +63,10 @@ Capacities
Retrieve capacities.
-Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-NotImplemented(501)
+Normal response codes: 200
-.. TODO(cdent): Need to do more digging, no idea.
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+gone(410), notImplemented(501)
List Cells With Details
=======================
@@ -85,7 +78,7 @@ Lists cells with details of capabilities.
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-NotImplemented(501)
+gone(410), notImplemented(501)
Request
-------
@@ -95,17 +88,6 @@ Request
- limit: limit_simple
- offset: offset_simple
-Response
---------
-
-**Example List Cells With Details: JSON response**
-
-.. TODO(cdent): This sample was initially list with an empty list of cells.
- The newly listed sample does not yet exist.
-
-.. TODO(cdent): literal-include: ../../doc/api_samples/os-cells/cells-list-details-resp.json
- :language: javascript
-
Info For This Cell
==================
@@ -113,12 +95,10 @@ Info For This Cell
Retrieve info about the current cell.
-Normal response code: 200
+Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-NotImplemented(501)
-
-.. TODO(cdent): this is weird, data is structured entirely differently.
+gone(410), notImplemented(501)
Show Cell Data
==============
@@ -130,7 +110,7 @@ Shows data for a cell.
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-itemNotFound(404), NotImplemented(501)
+itemNotFound(404), gone(410), notImplemented(501)
Request
-------
@@ -150,17 +130,22 @@ Response
Update a Cell
=============
-.. rest_method:: PUT /os-cells/{cell_od}
+.. rest_method:: PUT /os-cells/{cell_id}
Update an existing cell.
-Normal response code: 200
+Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-itemNotFound(404), NotImplemented(501)
+itemNotFound(404), gone(410), notImplemented(501)
-.. TODO(cdent): Figure out what's going on here.
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - cell_id: cell_id
Delete a Cell
=============
@@ -169,10 +154,17 @@ Delete a Cell
Remove a cell.
-Normal response code: 200
+Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-itemNotFound(404), NotImplemented(501)
+itemNotFound(404), gone(410), notImplemented(501)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - cell_id: cell_id
Show Cell Capacities
====================
@@ -181,12 +173,10 @@ Show Cell Capacities
Shows capacities for a cell.
-.. TODO(cdent): What's a capacities.
-
-Normal response codes: 200,501
+Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-itemNotFound(404), NotImplemented(501)
+itemNotFound(404), gone(410), notImplemented(501)
Request
-------
diff --git a/api-ref/source/os-cloudpipe.inc b/api-ref/source/os-cloudpipe.inc
index 584e7fa22e5..6898bb06c55 100644
--- a/api-ref/source/os-cloudpipe.inc
+++ b/api-ref/source/os-cloudpipe.inc
@@ -33,7 +33,7 @@ Response
- created_at: created
- instance_id: instance_id_cloudpipe
- internal_ip: fixed_ip
- - project_id: project_id_instance_action
+ - project_id: project_id_server
- public_ip: vpn_public_ip_resp
- public_port: vpn_public_port_resp
- state: vpn_state
diff --git a/api-ref/source/os-consoles.inc b/api-ref/source/os-consoles.inc
index b19bf706659..fe3c6545897 100644
--- a/api-ref/source/os-consoles.inc
+++ b/api-ref/source/os-consoles.inc
@@ -1,12 +1,15 @@
.. -*- rst -*-
-===============================================================
- Server consoles (servers, os-consoles, os-console-auth-tokens)
-===============================================================
+==================================================
+ XenServer VNC Proxy (XVP) consoles (os-consoles)
+==================================================
-Manages server consoles.
+Manages server XVP consoles.
-.. note:: This is only used in Xenserver VNC Proxy.
+.. warning::
+
+ These APIs are only applicable when using the XenServer virt driver.
+ They were removed in the 21.0.0 (Ussuri) release.
Lists Consoles
==============
@@ -17,7 +20,7 @@ Lists all consoles for a server.
Normal response codes: 200
-Error response codes: unauthorized(401), forbidden(403)
+Error response codes: unauthorized(401), forbidden(403), gone(410)
Request
-------
@@ -53,7 +56,8 @@ Creates a console for a server.
Normal response codes: 200
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+gone(410)
Request
-------
@@ -77,7 +81,8 @@ Shows console details for a server.
Normal response codes: 200
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+gone(410)
Request
-------
@@ -117,7 +122,8 @@ Deletes a console for a server.
Normal response codes: 202
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+gone(410)
Request
-------
@@ -132,48 +138,3 @@ Response
--------
If successful, this method does not return a response body.
-
-
-Show Console Connection Information
-===================================
-
-.. rest_method:: GET /os-console-auth-tokens/{console_token}
-
-Given the console authentication token for a server,
-shows the related connection information.
-
-This method used to be available only for the ``rdp-html5`` console type before
-microversion 2.31. Starting from microversion 2.31 it's available for all
-console types.
-
-Normal response codes: 200
-
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404)
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
-
- - console_token: console_token
-
-|
-
-Response
---------
-
-.. rest_parameters:: parameters.yaml
-
- - console: console
- - instance_uuid: instance_id_body
- - host: console_host
- - port: port_number
- - internal_access_path: internal_access_path
-
-|
-
-**Example Show Console Authentication Token**
-
-.. literalinclude:: ../../doc/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json
- :language: javascript
diff --git a/api-ref/source/os-flavor-extra-specs.inc b/api-ref/source/os-flavor-extra-specs.inc
index 4ea220a9f64..8ecbdfa15ee 100644
--- a/api-ref/source/os-flavor-extra-specs.inc
+++ b/api-ref/source/os-flavor-extra-specs.inc
@@ -7,6 +7,10 @@
Lists, creates, deletes, and updates the extra-specs or keys for a
flavor.
+Refer to `Compute Flavors
+`__
+for available built-in extra specs.
+
List Extra Specs For A Flavor
=============================
@@ -48,11 +52,6 @@ Create Extra Specs For A Flavor
Creates extra specs for a flavor, by ID.
-.. note:: Please reference:
- `Compute Flavors
- `__
- for available built-in extra specs under ``Extra Specs`` section.
-
Normal response codes: 200
Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
diff --git a/api-ref/source/os-floating-ip-dns.inc b/api-ref/source/os-floating-ip-dns.inc
index 0cd8c5a14b0..6d62b83264e 100644
--- a/api-ref/source/os-floating-ip-dns.inc
+++ b/api-ref/source/os-floating-ip-dns.inc
@@ -1,7 +1,6 @@
.. -*- rst -*-
-.. needs:parameter_verification
-.. needs:example_verification
-.. needs:body_verification
+.. NOTE(gmann): These APIs are deprecated so do not update this
+ file even body, example or parameters are not complete.
=============================================
Floating IP DNS records (os-floating-ip-dns)
diff --git a/api-ref/source/os-floating-ip-pools.inc b/api-ref/source/os-floating-ip-pools.inc
index 4054daf41a0..7860560269c 100644
--- a/api-ref/source/os-floating-ip-pools.inc
+++ b/api-ref/source/os-floating-ip-pools.inc
@@ -38,7 +38,7 @@ Response
.. rest_parameters:: parameters.yaml
- floating_ip_pools: floating_ip_pools
- - name: floating_ip_pool_name
+ - name: floating_ip_pool_name_or_id
**Example List Floating Ip Pools: JSON response**
diff --git a/api-ref/source/os-floating-ips.inc b/api-ref/source/os-floating-ips.inc
index ad7588ca9da..a4911dec217 100644
--- a/api-ref/source/os-floating-ips.inc
+++ b/api-ref/source/os-floating-ips.inc
@@ -10,7 +10,7 @@
deprecated all the proxy APIs and users should use the native APIs
instead. These will fail with a 404 starting from microversion 2.36.
See: `Relevant Network APIs
- `__.
+ `__.
Lists floating IP addresses for a project. Also, creates (allocates) a
floating IP address for a project, shows floating IP address details,
@@ -20,17 +20,17 @@ The cloud administrator configures a pool of floating IP addresses in
OpenStack Compute. The project quota defines the maximum number of
floating IP addresses that you can allocate to the project. After you
`allocate a floating IP
-address `__
+address `__
for a project, you can:
- `Add (associate) the floating IP
- address `__
with an instance in the project. You can associate only one floating
IP address with an instance at a time.
- `Remove (disassociate) the floating IP
- address `__
from an instance in the project.
@@ -62,7 +62,7 @@ Response
- id: floating_ip_id_value
- instance_id: server_id
- ip: floating_ip
- - pool: floating_ip_pool_name
+ - pool: floating_ip_pool_name_or_id
**Example List Floating Ip Addresses**
@@ -86,7 +86,7 @@ can change these permissions through the ``policy.json`` file.
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
itemNotFound(404)
Request
@@ -94,7 +94,7 @@ Request
.. rest_parameters:: parameters.yaml
- - pool: floating_ip_pool_name
+ - pool: floating_ip_pool_name_or_id
**Example Create (Allocate) Floating Ip Address**
@@ -111,7 +111,7 @@ Response
- id: floating_ip_id_value
- instance_id: server_id
- ip: floating_ip
- - pool: floating_ip_pool_name
+ - pool: floating_ip_pool_name_or_id
**Example Create (Allocate) Floating Ip Address: JSON response**
@@ -131,7 +131,7 @@ can change these permissions through the ``policy.json`` file.
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
itemNotFound(404)
Request
@@ -151,7 +151,7 @@ Response
- id: floating_ip_id_value
- instance_id: server_id
- ip: floating_ip
- - pool: floating_ip_pool_name
+ - pool: floating_ip_pool_name_or_id
**Example Show Floating Ip Address Details: JSON response**
@@ -176,7 +176,7 @@ can change these permissions through the ``policy.json`` file.
Normal response codes: 202
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
- itemNotFound(404)
+itemNotFound(404)
Request
-------
diff --git a/api-ref/source/os-hypervisors.inc b/api-ref/source/os-hypervisors.inc
index b6e661f8599..6363b409b40 100644
--- a/api-ref/source/os-hypervisors.inc
+++ b/api-ref/source/os-hypervisors.inc
@@ -12,6 +12,7 @@ for a hypervisor, lists all servers on hypervisors that match the given
``hypervisor_hostname_pattern`` or searches for hypervisors by the given
``hypervisor_hostname_pattern``.
+
List Hypervisors
================
@@ -64,6 +65,7 @@ Response
.. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-with-servers-resp.json
:language: javascript
+
List Hypervisors Details
========================
@@ -121,7 +123,8 @@ Response
- service.host: host_name_body
- service.id: service_id_body_2_52
- service.id: service_id_body_2_53
- - service.disable_reason: service_disable_reason
+ - service.disabled_reason: service_disable_reason
+ - uptime: hypervisor_uptime
- vcpus: hypervisor_vcpus
- vcpus_used: hypervisor_vcpus_used
- hypervisor_links: hypervisor_links
@@ -136,20 +139,41 @@ Response
.. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json
:language: javascript
-Show Hypervisor Statistics
-==========================
+**Example List Hypervisors Details (v2.88): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json
+ :language: javascript
+
+
+Show Hypervisor Statistics (DEPRECATED)
+=======================================
.. rest_method:: GET /os-hypervisors/statistics
+ max_version: 2.87
Shows summary statistics for all enabled hypervisors over all compute nodes.
+.. warning::
+
+ This API is deprecated and will fail with HTTP 404 starting with microversion
+ 2.88. Use placement to get information on resource usage across hypervisors.
+
Policy defaults enable only users with the administrative role to perform
this operation. Cloud providers can change these permissions through
the ``policy.json`` file.
+.. note::
+
+ As noted, some of the parameters in the response representing totals do not
+ take allocation ratios into account. This can result in a disparity between
+ the totals and the usages. A more accurate representation of state can be
+ obtained using `placement`__.
+
+ __ https://docs.openstack.org/api-ref/placement/#list-resource-provider-usages
+
Normal response codes: 200
-Error response codes: unauthorized(401), forbidden(403)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404)
Response
--------
@@ -158,7 +182,7 @@ Response
- hypervisor_statistics: hypervisor_statistics
- count: hypervisor_count
- - current_workload: current_workload
+ - current_workload: current_workload_total
- disk_available_least: disk_available_least_total
- free_disk_gb: hypervisor_free_disk_gb_total
- free_ram_mb: free_ram_mb_total
@@ -175,6 +199,7 @@ Response
.. literalinclude:: ../../doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json
:language: javascript
+
Show Hypervisor Details
=======================
@@ -186,6 +211,15 @@ Policy defaults enable only users with the administrative role to perform
this operation. Cloud providers can change these permissions through
the ``policy.json`` file.
+.. note::
+
+ As noted, some of the parameters in the response representing totals do not
+ take allocation ratios into account. This can result in a disparity between
+ the totals and the usages. A more accurate representation of state can be
+ obtained using `placement`__.
+
+ __ https://docs.openstack.org/api-ref/placement/#show-resource-provider-usages
+
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404)
@@ -230,7 +264,8 @@ Response
- service.host: host_name_body
- service.id: service_id_body_2_52
- service.id: service_id_body_2_53
- - service.disable_reason: service_disable_reason
+ - service.disabled_reason: service_disable_reason
+ - uptime: hypervisor_uptime
- vcpus: hypervisor_vcpus
- vcpus_used: hypervisor_vcpus_used
@@ -244,13 +279,26 @@ Response
.. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-show-with-servers-resp.json
:language: javascript
-Show Hypervisor Uptime
-======================
+**Example Show Hypervisors Details (v2.88): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json
+ :language: javascript
+
+
+Show Hypervisor Uptime (DEPRECATED)
+===================================
.. rest_method:: GET /os-hypervisors/{hypervisor_id}/uptime
+ max_version: 2.87
Shows the uptime for a given hypervisor.
+.. warning::
+
+ This API is deprecated and will fail with HTTP 404 starting with
+ microversion 2.88. Use `Show Hypervisor Details`_ with microversion 2.88
+ and later to get this information.
+
Policy defaults enable only users with the administrative role to perform
this operation. Cloud providers can change these permissions through
the ``policy.json`` file.
@@ -290,8 +338,9 @@ Response
.. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-uptime-resp.json
:language: javascript
-Search Hypervisor
-=================
+
+Search Hypervisor (DEPRECATED)
+==============================
.. rest_method:: GET /os-hypervisors/{hypervisor_hostname_pattern}/search
max_version: 2.52
@@ -333,8 +382,9 @@ Response
.. literalinclude:: ../../doc/api_samples/os-hypervisors/hypervisors-search-resp.json
:language: javascript
-List Hypervisor Servers
-=======================
+
+List Hypervisor Servers (DEPRECATED)
+====================================
.. rest_method:: GET /os-hypervisors/{hypervisor_hostname_pattern}/servers
max_version: 2.52
diff --git a/api-ref/source/os-instance-actions.inc b/api-ref/source/os-instance-actions.inc
index df961d45e9b..f0c191c67a5 100644
--- a/api-ref/source/os-instance-actions.inc
+++ b/api-ref/source/os-instance-actions.inc
@@ -34,6 +34,7 @@ Request
- limit: instance_action_limit
- marker: instance_action_marker
- changes-since: changes_since_instance_action
+ - changes-before: changes_before_instance_action
Response
--------
@@ -45,10 +46,10 @@ Response
- action: action
- instance_uuid: instance_id_body
- message: message
- - project_id: project_id_instance_action
+ - project_id: project_id_server_action
- request_id: request_id_body
- start_time: start_time
- - user_id: user_id
+ - user_id: user_id_server_action
- updated_at: updated_instance_action
- links: instance_actions_next_links
@@ -100,10 +101,10 @@ Response
- action: action
- instance_uuid: instance_id_body
- message: message
- - project_id: project_id_instance_action
+ - project_id: project_id_server_action
- request_id: request_id_body
- start_time: start_time
- - user_id: user_id
+ - user_id: user_id_server_action
- events: instance_action_events_2_50
- events: instance_action_events_2_51
- events.event: event
@@ -113,6 +114,7 @@ Response
- events.traceback: event_traceback
- events.hostId: event_hostId
- events.host: event_host
+ - events.details: event_details
- updated_at: updated_instance_action
**Example Show Server Action Details For Admin (v2.62)**
@@ -124,3 +126,8 @@ Response
.. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.62/instance-action-get-non-admin-resp.json
:language: javascript
+
+**Example Show Server Action Details For System Reader (v2.84)**
+
+.. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json
+ :language: javascript
diff --git a/api-ref/source/os-interface.inc b/api-ref/source/os-interface.inc
index 62151cbc3eb..10f3a450e87 100644
--- a/api-ref/source/os-interface.inc
+++ b/api-ref/source/os-interface.inc
@@ -41,12 +41,18 @@ Response
- mac_addr: mac_addr
- net_id: net_id_resp
- port_id: port_id_resp
+ - tag: device_tag_nic_attachment_resp
**Example List Port Interfaces: JSON response**
.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json
:language: javascript
+**Example List Tagged Port Interfaces (v2.70): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json
+ :language: javascript
+
Create Interface
================
@@ -103,12 +109,18 @@ Response
- net_id: net_id_resp
- port_id: port_id_resp
- port_state: port_state
+ - tag: device_tag_nic_attachment_resp
**Example Create Interface: JSON response**
.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json
:language: javascript
+**Example Create Tagged Interface (v2.70): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json
+ :language: javascript
+
Show Port Interface Details
===========================
@@ -142,12 +154,18 @@ Response
- mac_addr: mac_addr
- net_id: net_id_resp
- port_id: port_id_resp
+ - tag: device_tag_nic_attachment_resp
**Example Show Port Interface Details: JSON response**
.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json
:language: javascript
+**Example Show Tagged Port Interface Details (v2.70): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json
+ :language: javascript
+
Detach Interface
================
diff --git a/api-ref/source/os-keypairs.inc b/api-ref/source/os-keypairs.inc
index e728ef64357..e03e7d91aaf 100644
--- a/api-ref/source/os-keypairs.inc
+++ b/api-ref/source/os-keypairs.inc
@@ -41,7 +41,7 @@ Response
**Example List Keypairs (v2.35): JSON response**
-.. literalinclude:: ../../doc/api_samples/keypairs/v2.35/keypairs-list-resp.json
+.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json
:language: javascript
Create Or Import Keypair
@@ -72,7 +72,7 @@ Request
**Example Create Or Import Keypair (v2.10): JSON request**
-.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json
+.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json
:language: javascript
Response
@@ -90,7 +90,7 @@ Response
**Example Create Or Import Keypair (v2.10): JSON response**
-.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json
+.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json
:language: javascript
Show Keypair Details
@@ -131,7 +131,7 @@ Response
**Example Show Keypair Details (v2.10): JSON response**
-.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-get-resp.json
+.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json
:language: javascript
Delete Keypair
diff --git a/api-ref/source/os-migrations.inc b/api-ref/source/os-migrations.inc
index c8bc587f83b..2cfd948666f 100644
--- a/api-ref/source/os-migrations.inc
+++ b/api-ref/source/os-migrations.inc
@@ -17,6 +17,9 @@ Policy defaults enable only users with the administrative role to perform
this operation. Cloud providers can change these permissions through the
``policy.json`` file.
+Starting from microversion 2.59, the response is sorted by ``created_at``
+and ``id`` in descending order.
+
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403)
@@ -35,12 +38,16 @@ Request
- limit: migration_limit
- marker: migration_marker
- changes-since: changes_since_migration
+ - changes-before: changes_before_migration
+ - user_id: user_id_query_migrations
+ - project_id: project_id_query_migrations
Response
--------
.. rest_parameters:: parameters.yaml
+ - migrations: migrations
- created_at: created
- dest_compute: migrate_dest_compute
- dest_host: migrate_dest_host
@@ -57,19 +64,20 @@ Response
- links: migration_links_2_23
- uuid: migration_uuid
- migrations_links: migration_next_links_2_59
+ - user_id: user_id_migration_2_80
+ - project_id: project_id_migration_2_80
**Example List Migrations: JSON response**
.. literalinclude:: ../../doc/api_samples/os-migrations/migrations-get.json
:language: javascript
-**Example List Migrations (v2.59):**
+**Example List Migrations (v2.80):**
-.. literalinclude:: ../../doc/api_samples/os-migrations/v2.59/migrations-get.json
+.. literalinclude:: ../../doc/api_samples/os-migrations/v2.80/migrations-get.json
:language: javascript
-**Example List Migrations With Paging (v2.59):**
+**Example List Migrations With Paging (v2.80):**
-.. literalinclude:: ../../doc/api_samples/os-migrations/v2.59/migrations-get-with-limit.json
+.. literalinclude:: ../../doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json
:language: javascript
-
diff --git a/api-ref/source/os-networks.inc b/api-ref/source/os-networks.inc
index 450cd4981ad..a9ee87f69ac 100644
--- a/api-ref/source/os-networks.inc
+++ b/api-ref/source/os-networks.inc
@@ -1,32 +1,26 @@
.. -*- rst -*-
-.. needs:parameter_verification
-.. needs:example_verification
-.. needs:body_verification
-.. NOTE(sdague): for future verification only worry about the non
- deprecated methods in this file. Let's not spend a ton of brain
- power on the associate/disassociate that's going away.
-
-=====================================
+
+======================================
Networks (os-networks) (DEPRECATED)
-=====================================
+======================================
+
+.. warning::
-.. warning:: The networks API was designed to work with
- ``nova-network``. Some features are proxied to
- ``neutron`` when appropriate, but as with all translation
- proxies, this is far from perfect compatibility. These
- APIs should be avoided in new applications in favor of
- using ``neutron`` directly. These will fail with a 404
- starting from microversion 2.36.
- See: `Relevant Network APIs
- `__.
+ This API was designed to work with ``nova-network`` which was deprecated in
+ the 14.0.0 (Newton) release and removed in the 21.0.0 (Ussuri) release. Some
+ features are proxied to the Network service (neutron) when appropriate, but
+ as with all translation proxies, this is far from perfect compatibility.
+ These APIs should be avoided in new applications in favor of `using
+ neutron directly`__. These will fail with a 404 starting from microversion
+ 2.36. They were removed in the 21.0.0 (Ussuri) release.
+__ https://docs.openstack.org/api-ref/network/v2/#networks
Creates, lists, shows information for, and deletes networks.
Adds network to a project, disassociates a network from a project, and
disassociates a project from a network.
-
Associates host with and disassociates host from a network.
List Networks
@@ -64,7 +58,8 @@ these permissions through the ``policy.json`` file.
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), NotImplemented(501)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+conflict(409), gone(410), notImplemented(501)
Request
-------
@@ -95,7 +90,8 @@ this operation. Cloud providers can change these permissions through the
Normal response codes: 202
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), NotImplemented(501)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+gone(410), notImplemented(501)
Request
-------
@@ -150,7 +146,8 @@ these permissions through the ``policy.json`` file.
Normal response codes: 202
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+conflict(409), gone(410)
Request
-------
@@ -164,15 +161,11 @@ Response
There is no body content for the response of a successful DELETE query.
-Associate Host (DEPRECATED)
-===========================
+Associate Host
+==============
.. rest_method:: POST /os-networks/{network_id}/action
-.. warning::
- This API is only available with ``nova-network`` which is
- deprecated. It should be avoided in any new applications.
-
Associates a network with a host.
Specify the ``associate_host`` action in the request body.
@@ -183,7 +176,8 @@ permissions through the ``policy.json`` file.
Normal response codes: 202
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+gone(410), notImplemented(501)
Request
-------
@@ -203,15 +197,11 @@ Response
There is no body content for the response of a successful POST query.
-Disassociate Network (DEPRECATED)
-=================================
+Disassociate Network
+====================
.. rest_method:: POST /os-networks/{network_id}/action
-.. warning::
- This API is only available with ``nova-network`` which is
- deprecated. It should be avoided in any new applications.
-
Disassociates a network from a project. You can then reuse the network.
Specify the ``disassociate`` action in the request body.
@@ -222,7 +212,8 @@ these permissions through the ``policy.json`` file.
Normal response codes: 202
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+gone(410), notImplemented(501)
Request
-------
@@ -241,15 +232,11 @@ Response
There is no body content for the response of a successful POST query.
-Disassociate Host (DEPRECATED)
-==============================
+Disassociate Host
+=================
.. rest_method:: POST /os-networks/{network_id}/action
-.. warning::
- This API is only available with ``nova-network`` which is
- deprecated. It should be avoided in any new applications.
-
Disassociates a host from a network.
Specify the ``disassociate_host`` action in the request body.
@@ -260,7 +247,8 @@ these permissions through the ``policy.json`` file.
Normal response codes: 202
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+gone(410), notImplemented(501)
Request
-------
@@ -280,15 +268,11 @@ Response
There is no body content for the response of a successful POST query.
-Disassociate Project (DEPRECATED)
-=================================
+Disassociate Project
+====================
.. rest_method:: POST /os-networks/{network_id}/action
-.. warning::
- This API is only available with ``nova-network`` which is
- deprecated. It should be avoided in any new applications.
-
Disassociates a project from a network.
Specify the ``disassociate_project`` action in the request body.
@@ -299,7 +283,8 @@ these permissions through the ``policy.json`` file.
Normal response codes: 202
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+gone(410), notImplemented(501)
Request
-------
diff --git a/api-ref/source/os-quota-class-sets.inc b/api-ref/source/os-quota-class-sets.inc
index d604409664f..ee7aeb4f695 100644
--- a/api-ref/source/os-quota-class-sets.inc
+++ b/api-ref/source/os-quota-class-sets.inc
@@ -64,21 +64,21 @@ Response
- quota_class_set: quota_class_set
- cores: cores_quota_class
- - fixed_ips: fixed_ips_quota_class
- - floating_ips: floating_ips_quota_class
- id: quota_class_id_body
- - injected_file_content_bytes: injected_file_content_bytes
- - injected_file_path_bytes: injected_file_path_bytes
- - injected_files: injected_files_quota_class
- instances: instances_quota_class
- key_pairs: key_pairs_quota_class
- metadata_items: metadata_items
- ram: ram_quota_class
+ - fixed_ips: fixed_ips_quota_class
+ - floating_ips: floating_ips_quota_class
+ - networks: networks_quota_optional
- security_group_rules: security_group_rules_quota_class
- security_groups: security_groups_quota_class
- server_groups: server_groups_quota_class
- server_group_members: server_group_members_quota_class
- - networks: networks_quota_optional
+ - injected_file_content_bytes: injected_file_content_bytes
+ - injected_file_path_bytes: injected_file_path_bytes
+ - injected_files: injected_files_quota_class
**Example Show A Quota Class: JSON response(2.50)**
@@ -108,20 +108,20 @@ Request
- id: quota_class_id
- quota_class_set: quota_class_set
- cores: cores_quota_class_optional
- - fixed_ips: fixed_ips_quota_class_optional
- - floating_ips: floating_ips_quota_class_optional
- - injected_file_content_bytes: injected_file_content_bytes_quota_optional
- - injected_file_path_bytes: injected_file_path_bytes_quota_optional
- - injected_files: injected_files_quota_class_optional
- instances: instances_quota_class_optional
- key_pairs: key_pairs_quota_class_optional
- metadata_items: metadata_items_quota_optional
- ram: ram_quota_class_optional
- - security_group_rules: security_group_rules_quota_class_optional
- - security_groups: security_groups_quota_class_optional
- server_groups: server_groups_quota_class_optional
- server_group_members: server_group_members_quota_optional
+ - fixed_ips: fixed_ips_quota_class_optional
+ - floating_ips: floating_ips_quota_class_optional
- networks: networks_quota_optional
+ - security_group_rules: security_group_rules_quota_class_optional
+ - security_groups: security_groups_quota_class_optional
+ - injected_file_content_bytes: injected_file_content_bytes_quota_optional
+ - injected_file_path_bytes: injected_file_path_bytes_quota_optional
+ - injected_files: injected_files_quota_class_optional
**Example Update Quotas: JSON request(2.50)**
@@ -135,20 +135,20 @@ Response
- quota_class_set: quota_class_set
- cores: cores_quota_class
- - fixed_ips: fixed_ips_quota_class
- - floating_ips: floating_ips_quota_class
- - injected_file_content_bytes: injected_file_content_bytes
- - injected_file_path_bytes: injected_file_path_bytes
- - injected_files: injected_files_quota_class
- instances: instances_quota_class
- key_pairs: key_pairs_quota_class
- metadata_items: metadata_items
- ram: ram_quota_class
+ - fixed_ips: fixed_ips_quota_class
+ - floating_ips: floating_ips_quota_class
+ - networks: networks_quota_optional
- security_group_rules: security_group_rules_quota_class
- security_groups: security_groups_quota_class
- server_groups: server_groups_quota_class
- server_group_members: server_group_members_quota_class
- - networks: networks_quota_optional
+ - injected_file_content_bytes: injected_file_content_bytes
+ - injected_file_path_bytes: injected_file_path_bytes
+ - injected_files: injected_files_quota_class
**Example Update Quotas: JSON response(2.50)**
diff --git a/api-ref/source/os-quota-sets.inc b/api-ref/source/os-quota-sets.inc
index 30fef320eca..29d4c5e7b6e 100644
--- a/api-ref/source/os-quota-sets.inc
+++ b/api-ref/source/os-quota-sets.inc
@@ -39,21 +39,21 @@ Response
- quota_set: quota_set
- cores: cores
- - fixed_ips: fixed_ips_quota
- - floating_ips: floating_ips
- id: quota_tenant_or_user_id_body
- - injected_file_content_bytes: injected_file_content_bytes
- - injected_file_path_bytes: injected_file_path_bytes
- - injected_files: injected_files
- instances: instances
- key_pairs: key_pairs
- metadata_items: metadata_items
- ram: ram
- - security_group_rules: security_group_rules_quota
- - security_groups: security_groups_quota
- server_groups: server_groups
- server_group_members: server_group_members
+ - fixed_ips: fixed_ips_quota
+ - floating_ips: floating_ips
- networks: networks_quota_set_optional
+ - security_group_rules: security_group_rules_quota
+ - security_groups: security_groups_quota
+ - injected_file_content_bytes: injected_file_content_bytes
+ - injected_file_path_bytes: injected_file_path_bytes
+ - injected_files: injected_files
**Example Show A Quota: JSON response**
@@ -89,20 +89,20 @@ Request
- quota_set: quota_set
- force: force
- cores: cores_quota_optional
- - fixed_ips: fixed_ips_quota_optional
- - floating_ips: floating_ips_quota_optional
- - injected_file_content_bytes: injected_file_content_bytes_quota_optional
- - injected_file_path_bytes: injected_file_path_bytes_quota_optional
- - injected_files: injected_files_quota_optional
- instances: instances_quota_optional
- key_pairs: key_pairs_quota_optional
- metadata_items: metadata_items_quota_optional
- ram: ram_quota_optional
- - security_group_rules: security_group_rules
- - security_groups: security_groups_quota_optional
- server_groups: server_groups_quota_optional
- server_group_members: server_group_members_quota_optional
+ - fixed_ips: fixed_ips_quota_optional
+ - floating_ips: floating_ips_quota_optional
- networks: networks_quota_set_optional
+ - security_group_rules: security_group_rules
+ - security_groups: security_groups_quota_optional
+ - injected_file_content_bytes: injected_file_content_bytes_quota_optional
+ - injected_file_path_bytes: injected_file_path_bytes_quota_optional
+ - injected_files: injected_files_quota_optional
**Example Update Quotas: JSON request**
@@ -121,20 +121,20 @@ Response
- quota_set: quota_set
- cores: cores
- - fixed_ips: fixed_ips_quota
- - floating_ips: floating_ips
- - injected_file_content_bytes: injected_file_content_bytes
- - injected_file_path_bytes: injected_file_path_bytes
- - injected_files: injected_files
- instances: instances
- key_pairs: key_pairs
- metadata_items: metadata_items
- ram: ram
- - security_group_rules: security_group_rules_quota
- - security_groups: security_groups_quota
- server_groups: server_groups
- server_group_members: server_group_members
+ - fixed_ips: fixed_ips_quota
+ - floating_ips: floating_ips
- networks: networks_quota_set_optional
+ - security_group_rules: security_group_rules_quota
+ - security_groups: security_groups_quota
+ - injected_file_content_bytes: injected_file_content_bytes
+ - injected_file_path_bytes: injected_file_path_bytes
+ - injected_files: injected_files
**Example Update Quotas: JSON response**
@@ -192,21 +192,21 @@ Response
- quota_set: quota_set
- cores: cores
- - fixed_ips: fixed_ips_quota
- - floating_ips: floating_ips
- id: quota_tenant_or_user_id_body
- - injected_file_content_bytes: injected_file_content_bytes
- - injected_file_path_bytes: injected_file_path_bytes
- - injected_files: injected_files
- instances: instances
- key_pairs: key_pairs
- metadata_items: metadata_items
- ram: ram
- - security_group_rules: security_group_rules_quota
- - security_groups: security_groups_quota
- server_groups: server_groups
- server_group_members: server_group_members
+ - fixed_ips: fixed_ips_quota
+ - floating_ips: floating_ips
- networks: networks_quota_set_optional
+ - security_group_rules: security_group_rules_quota
+ - security_groups: security_groups_quota
+ - injected_file_content_bytes: injected_file_content_bytes
+ - injected_file_path_bytes: injected_file_path_bytes
+ - injected_files: injected_files
**Example List Default Quotas For Tenant: JSON response**
@@ -244,21 +244,21 @@ Response
- quota_set: quota_set
- cores: cores_quota_details
- - fixed_ips: fixed_ips_quota_details
- - floating_ips: floating_ips_quota_details
- id: quota_tenant_or_user_id_body
- - injected_file_content_bytes: injected_file_content_bytes_quota_details
- - injected_file_path_bytes: injected_file_path_bytes_quota_details
- - injected_files: injected_files_quota_details
- instances: instances_quota_details
- key_pairs: key_pairs_quota_details
- metadata_items: metadata_items_quota_details
- ram: ram_quota_details
- - security_group_rules: security_group_rules_quota_details
- - security_groups: security_groups_quota_details
- server_groups: server_groups_quota_details
- server_group_members: server_group_members_quota_details
+ - fixed_ips: fixed_ips_quota_details
+ - floating_ips: floating_ips_quota_details
- networks: networks_quota_set_optional
+ - security_group_rules: security_group_rules_quota_details
+ - security_groups: security_groups_quota_details
+ - injected_file_content_bytes: injected_file_content_bytes_quota_details
+ - injected_file_path_bytes: injected_file_path_bytes_quota_details
+ - injected_files: injected_files_quota_details
**Example Show A Quota: JSON response**
diff --git a/api-ref/source/os-security-group-default-rules.inc b/api-ref/source/os-security-group-default-rules.inc
index 4ed6f309d2c..9d47f0ad25e 100644
--- a/api-ref/source/os-security-group-default-rules.inc
+++ b/api-ref/source/os-security-group-default-rules.inc
@@ -1,16 +1,15 @@
.. -*- rst -*-
-.. needs:body_verification
-
-================================================================================
- Rules for default security group (os-security-group-default-rules) (DEPRECATED)
-================================================================================
+====================================================================
+ Rules for default security group (os-security-group-default-rules)
+====================================================================
.. warning::
This API only available with ``nova-network`` which is
deprecated. It should be avoided in any new applications.
These will fail with a 404 starting from microversion 2.36.
+ They were completely removed in the 21.0.0 (Ussuri) release.
Lists, shows information for, and creates default security group rules.
@@ -23,7 +22,8 @@ Lists default security group rules.
Normal response codes: 200
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), notImplemented(501)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+gone(410), notImplemented(501)
Response
--------
@@ -52,7 +52,8 @@ Shows details for a security group rule.
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), notImplemented(501)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+itemNotFound(404), gone(410), notImplemented(501)
Request
-------
@@ -91,7 +92,8 @@ IP protocol ( ``ip_protocol`` ) value. Otherwise, the operation returns the ``Ba
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), notImplemented(501)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+conflict(409), gone(410), notImplemented(501)
Request
-------
@@ -136,7 +138,8 @@ Deletes a security group rule.
Normal response codes: 204
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), notImplemented(501)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+itemNotFound(404), gone(410), notImplemented(501)
Request
-------
diff --git a/api-ref/source/os-security-group-rules.inc b/api-ref/source/os-security-group-rules.inc
index 09b9956c210..1a750c60d0d 100644
--- a/api-ref/source/os-security-group-rules.inc
+++ b/api-ref/source/os-security-group-rules.inc
@@ -10,7 +10,7 @@
deprecated all the proxy APIs and users should use the native APIs
instead. These will fail with a 404 starting from microversion 2.36.
See: `Relevant Network APIs
- `__.
+ `__.
Creates and deletes security group rules.
diff --git a/api-ref/source/os-security-groups.inc b/api-ref/source/os-security-groups.inc
index 3838d97a006..596df40cc72 100644
--- a/api-ref/source/os-security-groups.inc
+++ b/api-ref/source/os-security-groups.inc
@@ -1,7 +1,6 @@
.. -*- rst -*-
-.. needs:parameter_verification
-.. needs:example_verification
-.. needs:body_verification
+.. NOTE(gmann): These APIs are deprecated so do not update this
+ file even body, example or parameters are not complete.
==================================================
Security groups (os-security-groups) (DEPRECATED)
@@ -13,7 +12,7 @@
deprecated all the proxy APIs and users should use the native APIs
instead. These will fail with a 404 starting from microversion 2.36.
See: `Relevant Network APIs
- `__.
+ `__.
Lists, shows information for, creates, updates and deletes security groups.
diff --git a/api-ref/source/os-server-external-events.inc b/api-ref/source/os-server-external-events.inc
index b31c38116f6..d96bc263969 100644
--- a/api-ref/source/os-server-external-events.inc
+++ b/api-ref/source/os-server-external-events.inc
@@ -7,11 +7,11 @@
.. warning::
This is an ``admin`` level service API only designed to be used by
other OpenStack services. The point of this API is to coordinate
- between Nova and Neutron, Nova and Cinder (and potentially future
- services) on activities they both need to be involved in,
+ between Nova and Neutron, Nova and Cinder, Nova and Ironic (and potentially
+ future services) on activities they both need to be involved in,
such as network hotplugging.
- Unless you are writing Neutron or Cinder code you **should not**
+ Unless you are writing Neutron, Cinder or Ironic code you **should not**
be using this API.
Creates one or more external events. The API dispatches each event to a
@@ -32,11 +32,15 @@ updated ``code`` and ``status`` indicating their level of success.
Normal response codes: 200, 207
A 200 will be returned if all events succeeded, 207 will be returned
-if some events could not be processed. The ``code`` attribute for the
+if any events could not be processed. The ``code`` attribute for the
event will explain further what went wrong.
-Error response codes: badRequest(400), unauthorized(401), forbidden(403),
-itemNotFound(404)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403)
+
+.. note:: Prior to the fix for `bug 1855752`_, error response code 404 may be
+ erroneously returned when all events failed.
+
+.. _bug 1855752: https://bugs.launchpad.net/nova/+bug/1855752
Request
-------
diff --git a/api-ref/source/os-server-tags.inc b/api-ref/source/os-server-tags.inc
index 96fbad73fba..9ed62702e80 100644
--- a/api-ref/source/os-server-tags.inc
+++ b/api-ref/source/os-server-tags.inc
@@ -47,7 +47,7 @@ Response
.. rest_parameters:: parameters.yaml
- - tags: tags
+ - tags: tags_no_min
**Example List Tags:**
@@ -71,7 +71,7 @@ Request
.. rest_parameters:: parameters.yaml
- server_id: server_id_path
- - tags: tags
+ - tags: tags_no_min
**Example Replace Tags:**
@@ -83,7 +83,7 @@ Response
.. rest_parameters:: parameters.yaml
- - tags: tags
+ - tags: tags_no_min
**Example Replace Tags:**
diff --git a/api-ref/source/os-services.inc b/api-ref/source/os-services.inc
index 0f2c5cbc271..af495b4bac2 100644
--- a/api-ref/source/os-services.inc
+++ b/api-ref/source/os-services.inc
@@ -21,6 +21,14 @@ Lists all running Compute services.
Provides details why any services were disabled.
+.. note:: Starting with microversion 2.69 if service details cannot be loaded
+ due to a transient condition in the deployment like infrastructure failure,
+ the response body for those unavailable compute services in the down cells
+ will be missing keys. See `handling down cells
+ `__
+ section of the Compute API guide for more information on the keys that
+ would be returned in the partial constructs.
+
Normal response codes: 200
Error response codes: unauthorized(401), forbidden(403)
@@ -50,11 +58,20 @@ Response
- zone: OS-EXT-AZ:availability_zone
- forced_down: forced_down_2_11
-**Example List Compute Services**
+**Example List Compute Services (v2.11)**
.. literalinclude:: ../../doc/api_samples/os-services/v2.11/services-list-get-resp.json
:language: javascript
+**Example List Compute Services (v2.69)**
+
+This is a sample response for the services from the non-responsive part of the
+deployment. The responses for the available service records will be normal
+without any missing keys.
+
+.. literalinclude:: ../../doc/api_samples/os-services/v2.69/services-list-get-resp.json
+ :language: javascript
+
Disable Scheduling For A Compute Service
========================================
@@ -195,7 +212,16 @@ Update Forced Down
.. rest_method:: PUT /os-services/force-down
-Set or unset ``forced_down`` flag for the service.
+Set or unset ``forced_down`` flag for the service. ``forced_down`` is a manual
+override to tell nova that the service in question has been fenced manually by
+the operations team (either hard powered off, or network unplugged). That
+signals that it is safe to proceed with ``evacuate`` or other operations that
+nova has safety checks to prevent for hosts that are up.
+
+.. warning::
+
+ Setting a service forced down without completely fencing it will likely
+ result in the corruption of VMs on that host.
Action ``force-down`` available as of microversion 2.11.
@@ -246,7 +272,8 @@ Update Compute Service
Update a compute service to enable or disable scheduling, including recording a
reason why a compute service was disabled from scheduling. Set or unset the
-``forced_down`` flag for the service.
+``forced_down`` flag for the service. This operation is only allowed on
+services whose ``binary`` is ``nova-compute``.
This API is available starting with microversion 2.53.
@@ -322,6 +349,12 @@ Attempts to delete a ``nova-compute`` service which is still hosting instances
will result in a 409 HTTPConflict response. The instances will need to be
migrated or deleted before a compute service can be deleted.
+Similarly, attempts to delete a ``nova-compute`` service which is involved in
+in-progress migrations will result in a 409 HTTPConflict response. The
+migrations will need to be completed, for example confirming or reverting a
+resize, or the instances will need to be deleted before the compute service can
+be deleted.
+
.. important:: Be sure to stop the actual ``nova-compute`` process on the
physical host *before* deleting the service with this API.
Failing to do so can lead to the running service re-creating
diff --git a/api-ref/source/os-simple-tenant-usage.inc b/api-ref/source/os-simple-tenant-usage.inc
index 39c4af81513..570666649ea 100644
--- a/api-ref/source/os-simple-tenant-usage.inc
+++ b/api-ref/source/os-simple-tenant-usage.inc
@@ -8,6 +8,18 @@ Reports usage statistics of compute and storage resources periodically
for an individual tenant or all tenants. The usage statistics will include
all instances' CPU, memory and local disk during a specific period.
+.. warning::
+
+ The `os-simple-tenant-usage` will report usage statistics based on the latest
+ flavor that is configured in the virtual machine (VM), and ignoring stop,
+ pause, and other events that might have happened with the VM. Therefore, it
+ uses the time the VM existed in the cloud environment to execute the usage
+ accounting.
+
+ More information can be found at
+ http://eavesdrop.openstack.org/meetings/nova/2020/nova.2020-12-03-16.00.log.txt,
+ and https://review.opendev.org/c/openstack/nova/+/711113
+
Microversion 2.40 added pagination (and ``next`` links) to the usage
statistics via optional ``limit`` and ``marker`` query parameters. If
``limit`` isn't provided, the configurable ``max_limit`` will be used which
diff --git a/api-ref/source/os-tenant-network.inc b/api-ref/source/os-tenant-network.inc
index c464bdc33cf..41314fb24cb 100644
--- a/api-ref/source/os-tenant-network.inc
+++ b/api-ref/source/os-tenant-network.inc
@@ -1,11 +1,8 @@
.. -*- rst -*-
-.. needs:parameter_verification
-.. needs:example_verification
-.. needs:body_verification
-===================================================
+====================================================
Project networks (os-tenant-networks) (DEPRECATED)
-===================================================
+====================================================
.. warning::
@@ -13,7 +10,7 @@
deprecated all the proxy APIs and users should use the native APIs
instead. These will fail with a 404 starting from microversion 2.36.
See: `Relevant Network APIs
- `__.
+ `__.
Creates, lists, shows information for, and deletes project networks.
@@ -58,7 +55,8 @@ through the ``policy.json`` file.
Normal response codes: 200
-Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), serviceUnavailable(503)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+conflict(409), gone(410), serviceUnavailable(503)
**Example Create Project Network: JSON request**
@@ -122,7 +120,8 @@ can change these permissions through the ``policy.json`` file.
Normal response codes: 202
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409)
+Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
+conflict(409), gone(410)
Request
-------
diff --git a/api-ref/source/os-volume-attachments.inc b/api-ref/source/os-volume-attachments.inc
index 99a125562a5..803d59dc61b 100644
--- a/api-ref/source/os-volume-attachments.inc
+++ b/api-ref/source/os-volume-attachments.inc
@@ -34,16 +34,25 @@ Response
.. rest_parameters:: parameters.yaml
- volumeAttachments: volumeAttachments
- - device: device_resp
- - id: attachment_id_required
+ - id: volume_attachment_id_resp
- serverId: server_id
- volumeId: volumeId_resp
+ - device: attachment_device_resp
+ - tag: device_tag_bdm_attachment_resp
+ - delete_on_termination: delete_on_termination_attachments_resp
+ - attachment_id: attachment_volume_id_resp
+ - bdm_uuid: attachment_bdm_id_resp
**Example List volume attachments for an instance: JSON response**
.. literalinclude:: ../../doc/api_samples/os-volumes/list-volume-attachments-resp.json
:language: javascript
+**Example List tagged volume attachments for an instance (v2.89): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-volumes/v2.89/list-volume-attachments-resp.json
+ :language: javascript
+
Attach a volume to an instance
==============================
@@ -63,6 +72,10 @@ Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNo
to actually support a multiattach volume depends on the volume type
and compute hosting the instance.
+.. note:: This is an asynchronous API, callers should poll the status and list
+ of attachments of the volume within the volume API to determine when
+ the attachment has completed successfully.
+
Request
-------
@@ -73,6 +86,7 @@ Request
- volumeId: volumeId
- device: device
- tag: device_tag_bdm_attachment
+ - delete_on_termination: delete_on_termination_attachments_req
**Example Attach a volume to an instance: JSON request**
@@ -84,6 +98,11 @@ Request
.. literalinclude:: ../../doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json
:language: javascript
+**Example Attach a volume to an instance with "delete_on_termination" (v2.79): JSON request**
+
+.. literalinclude:: ../../doc/api_samples/os-volumes/v2.79/attach-volume-to-server-req.json
+ :language: javascript
+
Response
--------
@@ -91,15 +110,27 @@ Response
- volumeAttachment: volumeAttachment
- device: device_resp
- - id: attachment_id_required
+ - id: attachment_id_resp
- serverId: server_id
- volumeId: volumeId_resp
+ - tag: device_tag_bdm_attachment_resp
+ - delete_on_termination: delete_on_termination_attachments_resp
**Example Attach a volume to an instance: JSON response**
.. literalinclude:: ../../doc/api_samples/os-volumes/attach-volume-to-server-resp.json
:language: javascript
+**Example Attach a tagged volume to an instance (v2.70): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-volumes/v2.70/attach-volume-to-server-resp.json
+ :language: javascript
+
+**Example Attach a volume with "delete_on_termination" (v2.79): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-volumes/v2.79/attach-volume-to-server-resp.json
+ :language: javascript
+
Show a detail of a volume attachment
====================================
@@ -125,16 +156,25 @@ Response
.. rest_parameters:: parameters.yaml
- volumeAttachment: volumeAttachment
- - device: device_resp
- - id: attachment_id_required
+ - id: volume_attachment_id_resp
- serverId: server_id
- volumeId: volumeId_resp
+ - device: attachment_device_resp
+ - tag: device_tag_bdm_attachment_resp
+ - delete_on_termination: delete_on_termination_attachments_resp
+ - attachment_id: attachment_volume_id_resp
+ - bdm_uuid: attachment_bdm_id_resp
**Example Show a detail of a volume attachment: JSON response**
.. literalinclude:: ../../doc/api_samples/os-volumes/volume-attachment-detail-resp.json
:language: javascript
+**Example Show a detail of a tagged volume attachment (v2.89): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-volumes/v2.89/volume-attachment-detail-resp.json
+ :language: javascript
+
Update a volume attachment
==========================
@@ -145,9 +185,28 @@ Update a volume attachment.
.. note:: This action only valid when the server is in ACTIVE, PAUSED and RESIZED state,
or a conflict(409) error will be returned.
-Policy defaults enable only users with the administrative role or
-the owner of the server to perform this operation. Cloud providers
-can change these permissions through the ``policy.json`` file.
+.. warning:: When updating volumeId, this API is typically meant to
+ only be used as part of a larger orchestrated volume
+ migration operation initiated in the block storage
+ service via the ``os-retype`` or ``os-migrate_volume``
+ volume actions. Direct usage of this API to update
+ volumeId is not recommended and may result in needing to
+ hard reboot the server to update details within the guest
+ such as block storage serial IDs. Furthermore, updating
+ volumeId via this API is only implemented by `certain
+ compute drivers`_.
+
+.. _certain compute drivers: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_swap_volume
+
+Policy default role is 'rule:system_admin_or_owner', its scope is
+[system, project], which allow project members or system admins to
+change the fields of an attached volume of a server. Policy defaults
+enable only users with the administrative role to change ``volumeId``
+via this operation. Cloud providers can change these permissions
+through the ``policy.json`` file.
+
+Updating, or what is commonly referred to as "swapping", volume attachments
+with volumes that have more than one read/write attachment, is not supported.
Normal response codes: 202
@@ -162,10 +221,19 @@ Request
- volume_id: volume_id_swap_src
- volumeAttachment: volumeAttachment_put
- volumeId: volumeId_swap
+ - delete_on_termination: delete_on_termination_put_req
+ - device: attachment_device_put_req
+ - serverId: attachment_server_id_put_req
+ - tag: device_tag_bdm_attachment_put_req
+ - id: attachment_id_put_req
-**Example Update a volume attachment: JSON request**
+.. note:: Other than ``volumeId``, as of v2.85 only
+ ``delete_on_termination`` may be changed from the current
+ value.
-.. literalinclude:: ../../doc/api_samples/os-volumes/update-volume-req.json
+**Example Update a volume attachment (v2.85): JSON request**
+
+.. literalinclude:: ../../doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json
:language: javascript
Response
@@ -187,6 +255,11 @@ Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNo
.. note:: From v2.20 detach a volume from an instance in SHELVED or SHELVED_OFFLOADED
state is allowed.
+.. note:: This is an asynchronous API, callers should poll the list
+ of volume attachments provided by ``GET
+ /servers/{server_id}/os-volume_attachments`` to determine when the
+ detachment of the volume has completed successfully.
+
Request
-------
diff --git a/api-ref/source/os-volumes.inc b/api-ref/source/os-volumes.inc
index 1b99a80896f..1f711978900 100644
--- a/api-ref/source/os-volumes.inc
+++ b/api-ref/source/os-volumes.inc
@@ -10,7 +10,7 @@
deprecated all the proxy APIs and users should use the native APIs
instead. These will fail with a 404 starting from microversion 2.36.
See: `Relevant Volume APIs
- `__.
+ `__.
Manages volumes and snapshots for use with the Compute API.
Lists, shows details, creates, and deletes volumes and snapshots.
@@ -50,7 +50,7 @@ Response
- displayDescription: display_description
- displayName: display_name
- id: volume_id_resp
- - metadata: metadata_object
+ - metadata: metadata_object
- size: size
- snapshotId: snapshot_id
- status: volume_status
@@ -110,7 +110,7 @@ Response
- displayName: display_name
- displayDescription: display_description
- id: volume_id_resp
- - metadata: metadata_object
+ - metadata: metadata_object
- size: size
- snapshotId: snapshot_id
- status: volume_status
@@ -158,7 +158,7 @@ Response
- displayName: display_name
- displayDescription: display_description
- id: volume_id_resp
- - metadata: metadata_object
+ - metadata: metadata_object
- size: size
- snapshotId: snapshot_id
- status: volume_status
@@ -205,7 +205,7 @@ Response
- displayName: display_name
- displayDescription: display_description
- id: volume_id_resp
- - metadata: metadata_object
+ - metadata: metadata_object
- size: size
- snapshotId: snapshot_id
- status: volume_status
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 74621f2e226..5ea19faab93 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -25,7 +25,6 @@ tag_location:
in: header
required: true
type: string
- min_version: 2.26
x-compute-request-id_resp:
description: |
The local request ID, which is a unique ID generated automatically
@@ -324,7 +323,6 @@ tag:
in: path
required: true
type: string
- min_version: 2.26
tenant_id:
description: |
The UUID of the tenant in a multi-tenancy cloud.
@@ -398,7 +396,7 @@ all_tenants_query:
description: |
Specify the ``all_tenants`` query parameter to list all instances
for all projects. By default this is only allowed by administrators.
- If the value of this parameter is not specified, it is treated as
+ If this parameter is specified without a value, the value defaults to
``True``. If the value is specified, ``1``, ``t``, ``true``,
``on``, ``y`` and ``yes`` are treated as ``True``. ``0``, ``f``,
``false``, ``off``, ``n`` and ``no`` are treated as ``False``.
@@ -419,8 +417,9 @@ availability_zone_query_server:
description: |
Filter the server list result by server availability zone.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -453,6 +452,70 @@ changes-since:
in: query
required: false
type: string
+changes_before_instance_action:
+ description: |
+ Filters the response by a date and time stamp when the instance actions last changed.
+ Those instances that changed before or equal to the specified date and time stamp
+ are returned.
+
+ The date and time stamp format is `ISO 8601 `_:
+ ::
+
+ CCYY-MM-DDThh:mm:ss±hh:mm
+
+ The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC.
+ For example, ``2015-08-27T09:49:58-05:00``.
+ If you omit the time zone, the UTC time zone is assumed.
+ When both ``changes-since`` and ``changes-before`` are specified,
+ the value of the ``changes-before`` must be later than or equal to
+ the value of the ``changes-since`` otherwise API will return 400.
+ in: query
+ required: false
+ type: string
+ min_version: 2.66
+changes_before_migration:
+ description: |
+ Filters the response by a date and time stamp when the migration last
+ changed. Those migrations that changed before or equal to the specified date and time
+ stamp are returned.
+
+ The date and time stamp format is `ISO 8601 `_:
+ ::
+
+ CCYY-MM-DDThh:mm:ss±hh:mm
+
+ The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC.
+ For example, ``2015-08-27T09:49:58-05:00``.
+ If you omit the time zone, the UTC time zone is assumed.
+ When both ``changes-since`` and ``changes-before`` are specified,
+ the value of the ``changes-before`` must be later than or equal to
+ the value of the ``changes-since`` otherwise API will return 400.
+ in: query
+ required: false
+ type: string
+ min_version: 2.66
+changes_before_server:
+ description: |
+ Filters the response by a date and time stamp when the server last changed.
+ Those servers that changed before or equal to the specified date and time stamp
+ are returned. To help keep track of changes this may also return recently deleted
+ servers.
+
+ The date and time stamp format is `ISO 8601 `_:
+ ::
+
+ CCYY-MM-DDThh:mm:ss±hh:mm
+
+ The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC.
+ For example, ``2015-08-27T09:49:58-05:00``.
+ If you omit the time zone, the UTC time zone is assumed.
+ When both ``changes-since`` and ``changes-before`` are specified,
+ the value of the ``changes-before`` must be later than or equal to
+ the value of the ``changes-since`` otherwise API will return 400.
+ in: query
+ required: false
+ type: string
+ min_version: 2.66
changes_since_instance_action:
description: |
Filters the response by a date and time stamp when the instance action last
@@ -466,6 +529,9 @@ changes_since_instance_action:
The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC.
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
+ When both ``changes-since`` and ``changes-before`` are specified,
+ the value of the ``changes-since`` must be earlier than or equal to
+ the value of the ``changes-before`` otherwise API will return 400.
in: query
required: false
type: string
@@ -483,6 +549,9 @@ changes_since_migration:
The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC.
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
+ When both ``changes-since`` and ``changes-before`` are specified,
+ the value of the ``changes-since`` must be earlier than or equal to
+ the value of the ``changes-before`` otherwise API will return 400.
in: query
required: false
type: string
@@ -501,6 +570,9 @@ changes_since_server:
The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC.
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
+ When both ``changes-since`` and ``changes-before`` are specified,
+ the value of the ``changes-since`` must be earlier than or equal to
+ the value of the ``changes-before`` otherwise API will return 400.
in: query
required: false
type: string
@@ -508,8 +580,9 @@ config_drive_query_server:
description: |
Filter the server list result by the config drive setting of the server.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -526,8 +599,9 @@ created_at_query_server:
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -626,11 +700,17 @@ exclude:
flavor_is_public_query:
in: query
required: false
- type: boolean
+ type: string
description: |
- Filters the flavor list by only public flavors. By default ``non
- admin`` users only see public flavors, and ``admin`` users can see
- additional non public flavors.
+ This parameter is only applicable to users with the administrative role.
+ For all other non-admin users, the parameter is ignored and only public
+ flavors will be returned. Filters the flavor list based on whether the
+ flavor is public or private. If the value of this parameter is not
+ specified, it is treated as ``True``. If the value is specified, ``1``,
+ ``t``, ``true``, ``on``, ``y`` and ``yes`` are treated as ``True``. ``0``,
+ ``f``, ``false``, ``off``, ``n`` and ``no`` are treated as ``False``
+ (they are case-insensitive). If the value is ``None`` (case-insensitive)
+ both public and private flavors will be listed in a single request.
flavor_query:
description: |
Filters the response by a flavor, as a UUID. A flavor is a combination of memory,
@@ -657,8 +737,10 @@ hostname_query_server:
description: |
Filter the server list result by the host name of server.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is only valid when specified by administrators until
+ microversion 2.90, after which it can be specified by all users.
+ If non-admin users specify this parameter before microversion 2.90, it is
+ ignored.
in: query
required: false
type: string
@@ -811,8 +893,9 @@ key_name_query_server:
description: |
Filter the server list result by keypair name.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -865,8 +948,9 @@ launched_at_query_server:
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -898,6 +982,18 @@ locked_by_query_server:
in: query
required: false
type: string
+locked_query_server:
+ description: |
+ Specify the ``locked`` query parameter to list all locked or unlocked
+ instances. If the value is specified, ``1``, ``t``, ``true``,
+ ``on``, ``y`` and ``yes`` are treated as ``True``. ``0``, ``f``,
+ ``false``, ``off``, ``n`` and ``no`` are treated as ``False``.
+ (They are case-insensitive.) Any other value provided will be considered
+ invalid.
+ in: query
+ required: false
+ type: boolean
+ min_version: 2.73
marker:
description: |
The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited
@@ -961,7 +1057,12 @@ migration_status:
type: string
migration_type:
description: |
- The type of migration to filter.
+ The type of migration to filter. Valid values are:
+
+ * ``evacuation``
+ * ``live-migration``
+ * ``migration``
+ * ``resize``
in: query
required: false
type: string
@@ -973,7 +1074,7 @@ minDisk:
type: integer
minRam:
description: |
- Filters the response by a minimum RAM, in MB. For example, ``512``.
+ Filters the response by a minimum RAM, in MiB. For example, ``512``.
in: query
required: false
type: integer
@@ -1027,18 +1128,27 @@ power_state_query_server:
6: CRASHED
7: SUSPENDED
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
progress_query_server:
description: |
Filter the server list result by the progress of the server.
The value could be from 0 to 100 as integer.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: integer
+project_id_query_migrations:
+ description: |
+ Filter the migrations by the given project ID.
+ in: query
+ required: false
+ type: string
+ min_version: 2.80
project_id_query_server:
description: |
Filter the list of servers by the given project ID.
@@ -1109,7 +1219,7 @@ server_status_query:
description: |
Filters the response by a server status, as a string. For example, ``ACTIVE``.
- Up to microversion 2.37, an empty list is returnd if an invalid status is
+ Up to microversion 2.37, an empty list is returned if an invalid status is
specified. Starting from microversion 2.38, a 400 error is returned
in that case.
in: query
@@ -1136,7 +1246,7 @@ sort_dir_flavor:
Sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending).
Default is ``asc``. You can specify multiple pairs of sort key and sort direction
query parameters. If you omit the sort direction in a pair, the API uses the natural
- sorting direction of the direction of the flavor ``sort_key`` attribute.
+ sorting direction of the flavor ``sort_key`` attribute.
in: query
required: false
type: string
@@ -1145,7 +1255,7 @@ sort_dir_server:
Sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending).
Default is ``desc``. You can specify multiple pairs of sort key and sort direction
query parameters. If you omit the sort direction in a pair, the API uses the natural
- sorting direction of the direction of the server ``sort_key`` attribute.
+ sorting direction of the server ``sort_key`` attribute.
in: query
required: false
type: string
@@ -1154,7 +1264,23 @@ sort_key_flavor:
Sorts by a flavor attribute. Default attribute is ``flavorid``. You can specify
multiple pairs of sort key and sort direction query parameters. If you omit the
sort direction in a pair, the API uses the natural sorting direction of the flavor
- ``sort_key`` attribute.
+ ``sort_key`` attribute. The sort keys are limited to:
+
+ - ``created_at``
+ - ``description``
+ - ``disabled``
+ - ``ephemeral_gb``
+ - ``flavorid``
+ - ``id``
+ - ``is_public``
+ - ``memory_mb``
+ - ``name``
+ - ``root_gb``
+ - ``rxtx_factor``
+ - ``swap``
+ - ``updated_at``
+ - ``vcpu_weight``
+ - ``vcpus``
in: query
required: false
type: string
@@ -1182,6 +1308,7 @@ sort_key_server:
- ``key_name``
- ``launch_index``
- ``launched_at``
+ - ``locked`` (New in version 2.73)
- ``locked_by``
- ``node``
- ``power_state``
@@ -1253,8 +1380,9 @@ task_state_query_server:
description: |
Filter the server list result by task state.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
tenant_id_query:
description: |
Specify the project ID (tenant ID) to show the rate and absolute limits.
@@ -1274,8 +1402,9 @@ terminated_at_query_server:
For example, ``2015-08-27T09:49:58-05:00``.
If you omit the time zone, the UTC time zone is assumed.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -1298,6 +1427,13 @@ usage_marker:
required: false
type: string
min_version: 2.40
+user_id_query_migrations:
+ description: |
+ Filter the migrations by the given user ID.
+ in: query
+ required: false
+ type: string
+ min_version: 2.80
user_id_query_quota:
description: |
ID of user to list the quotas for.
@@ -1314,8 +1450,9 @@ user_id_query_server:
description: |
Filter the list of servers by the given user ID.
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -1344,8 +1481,9 @@ vm_state_query_server:
- ``STOPPED``
- ``SUSPENDED``
- This parameter is only valid when specified by administrators.
- If non-admin users specify this parameter, it is ignored.
+ This parameter is restricted to administrators until microversion 2.83.
+ If non-admin users specify this parameter on a microversion less than 2.83,
+ it will be ignored.
in: query
required: false
type: string
@@ -1416,12 +1554,8 @@ address:
type: string
addresses:
description: |
- The addresses for the server. Addresses information is hidden for any server
- in a state set in the ``hide_server_address_states`` configuration option.
- By default, servers in ``building`` state hide their addresses information.
- See ``nova.conf`` `configuration options
- `_
- for more information.
+ The addresses for the server. Servers with status ``BUILD`` hide their
+ addresses information.
in: body
required: true
type: object
@@ -1531,12 +1665,26 @@ aggregate_az:
in: body
required: true
type: string
-aggregate_az_optional:
+aggregate_az_optional_create:
+ description: |
+ The availability zone of the host aggregate. You should use a custom
+ availability zone rather than the default returned by the
+ os-availability-zone API. The availability zone must not include ':'
+ in its name.
+ in: body
+ required: false
+ type: string
+aggregate_az_optional_update:
description: |
The availability zone of the host aggregate. You should use a custom
availability zone rather than the default returned by the
os-availability-zone API. The availability zone must not include ':'
in its name.
+
+ .. warning:: You should not change or unset the availability zone of an
+ aggregate when that aggregate has hosts which contain servers in it
+ since that may impact the ability for those servers to move to another
+ host.
in: body
required: false
type: string
@@ -1552,9 +1700,26 @@ aggregate_id_body:
in: body
required: true
type: integer
-aggregate_metadata:
+aggregate_metadata_request:
+ description: |
+ Metadata key and value pairs associated with the aggregate.
+ The maximum size for each metadata key and value pair is 255 bytes.
+
+ New keys will be added to existing aggregate metadata. For existing
+ keys, if the value is ``null`` the entry is removed, otherwise the
+ value is updated. Note that the special ``availability_zone`` metadata
+ entry cannot be unset to ``null``.
+
+ .. warning:: You should not change the availability zone of an
+ aggregate when that aggregate has hosts which contain servers in it
+ since that may impact the ability for those servers to move to another
+ host.
+ in: body
+ required: true
+ type: object
+aggregate_metadata_response:
description: |
- Metadata key and value pairs associate with the aggregate.
+ Metadata key and value pairs associated with the aggregate.
in: body
required: true
type: object
@@ -1613,30 +1778,59 @@ associate_host:
in: body
required: true
type: string
+attachment_bdm_id_resp:
+ description: |
+ The UUID of the block device mapping record in Nova for the attachment.
+ in: body
+ required: true
+ type: string
+ min_version: 2.89
+attachment_device_put_req:
+ description: |
+ Name of the device in the attachment object, such as, ``/dev/vdb``.
+ in: body
+ required: false
+ type: string
+ min_version: 2.85
attachment_device_resp:
description: |
Name of the device in the attachment object, such as, ``/dev/vdb``.
in: body
required: false
type: string
-attachment_id_required:
+attachment_id_put_req:
description: |
The UUID of the attachment.
in: body
- required: true
+ required: false
type: string
+ min_version: 2.85
attachment_id_resp:
description: |
The UUID of the attachment.
in: body
required: false
type: string
+attachment_server_id_put_req:
+ description: |
+ The UUID of the server.
+ in: body
+ required: false
+ type: string
+ min_version: 2.85
attachment_server_id_resp:
description: |
The UUID of the server.
in: body
required: false
type: string
+attachment_volume_id_resp:
+ description: |
+ The UUID of the associated volume attachment in Cinder.
+ in: body
+ required: true
+ type: string
+ min_version: 2.89
attachment_volumeId_resp:
description: |
The UUID of the attached volume.
@@ -1661,6 +1855,15 @@ availability_zone_state:
in: body
required: true
type: object
+availability_zone_unshelve:
+ description: |
+ The availability zone name. Specifying an availability zone is only
+ allowed when the server status is ``SHELVED_OFFLOADED`` otherwise a
+ 409 HTTPConflict response is returned.
+ in: body
+ required: false
+ type: string
+ min_version: 2.77
available:
description: |
Returns true if the availability zone is available.
@@ -1836,6 +2039,11 @@ boot_index:
in: body
required: true
type: integer
+cache:
+ description: A list of image objects to cache.
+ in: body
+ required: true
+ type: array
certificate:
description: |
The certificate object.
@@ -1880,7 +2088,7 @@ code:
type: string
config_drive:
description: |
- Indicates whether a configuration drive enables metadata injection. The config_drive
+ Indicates whether a config drive enables metadata injection. The config_drive
setting provides information about a drive that the instance can mount at boot
time. The instance reads files from the drive to get information that is normally
available through the metadata service. This metadata is different from the user
@@ -1904,6 +2112,15 @@ config_drive_resp:
in: body
required: true
type: string
+config_drive_resp_update_rebuild:
+ description: |
+ Indicates whether or not a config drive was used for this server.
+ The value is ``True`` or an empty string. An empty string stands for
+ ``False``.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
configure_project_cloudpipe:
description: |
VPN IP and Port information to configure the cloudpipe instance..
@@ -2019,6 +2236,7 @@ cpu_info:
in: body
required: true
type: object
+ max_version: 2.87
create_info:
description: |
Information for snapshot creation.
@@ -2073,9 +2291,20 @@ createImage:
type: object
current_workload:
description: |
- The current_workload is the number of tasks the hypervisor is responsible for. This will be
- equal or greater than the number of active VMs on the system (it can be greater when VMs
- are being deleted and the hypervisor is still cleaning up).
+ The current_workload is the number of tasks the hypervisor is responsible
+ for. This will be equal or greater than the number of active VMs on the
+ system (it can be greater when VMs are being deleted and the hypervisor is
+ still cleaning up).
+ in: body
+ required: true
+ type: integer
+ max_version: 2.87
+current_workload_total:
+ description: |
+ The current_workload is the number of tasks the hypervisors are responsible
+ for. This will be equal or greater than the number of active VMs on the
+ systems (it can be greater when VMs are being deleted and a hypervisor is
+ still cleaning up).
in: body
required: true
type: integer
@@ -2092,6 +2321,30 @@ delete_on_termination:
in: body
required: false
type: boolean
+delete_on_termination_attachments_req:
+ description: |
+ To delete the attached volume when the server is destroyed, specify ``true``.
+ Otherwise, specify ``false``. Default: ``false``
+ in: body
+ required: false
+ type: boolean
+ min_version: 2.79
+delete_on_termination_attachments_resp:
+ description: |
+ A flag indicating if the attached volume will be deleted when the server is
+ deleted.
+ in: body
+ required: true
+ type: boolean
+ min_version: 2.79
+delete_on_termination_put_req:
+ description: |
+ A flag indicating if the attached volume will be deleted when the server is
+ deleted.
+ in: body
+ required: false
+ type: boolean
+ min_version: 2.85
deleted:
description: |
A boolean indicates whether this aggregate is deleted or not, if it has
@@ -2182,6 +2435,20 @@ device_tag_bdm_attachment:
required: false
type: string
min_version: 2.49
+device_tag_bdm_attachment_put_req:
+ description: |
+ The device tag applied to the volume block device or ``null``.
+ in: body
+ required: true
+ type: string
+ min_version: 2.85
+device_tag_bdm_attachment_resp:
+ description: |
+ The device tag applied to the volume block device or ``null``.
+ in: body
+ required: true
+ type: string
+ min_version: 2.70
device_tag_nic:
description: |
A device role tag that can be applied to a network interface. The guest OS
@@ -2206,12 +2473,36 @@ device_tag_nic_attachment:
required: false
type: string
min_version: 2.49
+device_tag_nic_attachment_resp:
+ description: |
+ The device tag applied to the virtual network interface or ``null``.
+ in: body
+ required: true
+ type: string
+ min_version: 2.70
device_type:
description: |
The device type. For example, ``disk``, ``cdrom``.
in: body
required: false
type: string
+device_volume_type:
+ description: |
+ The device ``volume_type``. This can be used to specify the type of volume
+ which the compute service will create and attach to the server.
+ If not specified, the block storage service will provide a default volume
+ type. See the `block storage volume types API `_
+ for more details.
+ There are some restrictions on ``volume_type``:
+
+ - It can be a volume type ID or name.
+ - It is only supported with ``source_type`` of ``blank``, ``image`` or
+ ``snapshot``.
+ - It is only supported with ``destination_type`` of ``volume``.
+ in: body
+ required: false
+ type: string
+ min_version: 2.67
# Optional input parameter in the body for PUT /os-services/{service_id} added
# in microversion 2.53.
disabled_reason_2_53_in:
@@ -2229,22 +2520,28 @@ disabled_reason_body:
type: string
disk_available_least:
description: |
- The actual free disk on this hypervisor(in GB).
+ The actual free disk on this hypervisor(in GiB). If allocation ratios used
+ for overcommit are configured, this may be negative. This is intentional as
+ it provides insight into the amount by which the disk is overcommitted.
in: body
required: true
type: integer
+ max_version: 2.87
disk_available_least_total:
description: |
- The actual free disk on all hypervisors(in GB).
+ The actual free disk on all hypervisors(in GiB). If allocation ratios used
+ for overcommit are configured, this may be negative. This is intentional as
+ it provides insight into the amount by which the disk is overcommitted.
in: body
required: true
type: integer
disk_bus:
description: |
Disk bus type, some hypervisors (currently only libvirt) support
- specify this parameter. Some example disk_bus values can be: `ide`,
- `usb`, `virtio`, `scsi`. This is not an exhaustive list as it depends
- on the virtualization driver, and may change as more support is added.
+ specify this parameter. Some example disk_bus values can be: ``fdc``,
+ ``ide``, ``sata``, ``scsi``, ``usb``, ``virtio``, ``xen``, ``lxc``
+ and ``uml``. Support for each bus type depends on the virtualization driver
+ and underlying hypervisor.
in: body
required: false
type: string
@@ -2374,6 +2671,13 @@ event:
in: body
required: true
type: string
+event_details:
+ min_version: 2.84
+ description: |
+ Details of the event. May be ``null``.
+ in: body
+ required: true
+ type: string
event_finish_time:
description: |
The date and time when the event was finished. The date and time
@@ -2414,9 +2718,16 @@ event_hostId:
type: string
event_name:
description: |
- The event name. A valid value is ``network-changed``, ``network-vif-plugged``,
- ``network-vif-unplugged``, ``network-vif-deleted``, or ``volume-extended``.
- The event name ``volume-extended`` is added since microversion ``2.51``.
+ The event name. A valid value is:
+
+ - ``network-changed``
+ - ``network-vif-plugged``
+ - ``network-vif-unplugged``
+ - ``network-vif-deleted``
+ - ``volume-extended`` (since microversion ``2.51``)
+ - ``power-update`` (since microversion ``2.76``)
+ - ``accelerator-request-bound`` (since microversion ``2.82``)
+
in: body
required: true
type: string
@@ -2450,7 +2761,15 @@ event_status:
type: string
event_tag:
description: |
- A string value that identifies the event.
+ A string value that identifies the event. Certain types of events require
+ specific tags:
+
+ - For the ``accelerator-request-bound`` event, the tag must be
+ the accelerator request UUID.
+ - For the ``power-update`` event the tag must be either be ``POWER_ON``
+ or ``POWER_OFF``.
+ - For the ``volume-extended`` event the tag must be the volume id.
+
in: body
required: false
type: string
@@ -2466,7 +2785,7 @@ event_traceback:
type: string
events:
description: |
- The action.
+ List of external events to process.
in: body
required: true
type: array
@@ -2692,7 +3011,6 @@ flavor_description_required:
type: string
in: body
required: true
- min_version: 2.55
description: |
A free form description of the flavor. Limited to 65535 characters
in length. Only printable characters are allowed.
@@ -2703,6 +3021,12 @@ flavor_description_resp:
required: true
type: string
min_version: 2.55
+flavor_description_resp_no_min:
+ description: |
+ The description of the flavor.
+ in: body
+ required: true
+ type: string
flavor_disabled:
in: body
required: false
@@ -2717,7 +3041,7 @@ flavor_disk:
description: |
The size of the root disk that will be created in GiB. If 0 the
root disk will be set to exactly the size of the image used to
- deploy the instance. However, in this case filter scheduler cannot
+ deploy the instance. However, in this case the scheduler cannot
select the compute host based on the virtual image size. Therefore,
0 should only be used for volume booted instances or for testing
purposes. Volume-backed instances can be enforced for flavors with
@@ -2825,7 +3149,7 @@ flavor_links_2_46:
description: |
Links to the flavor resource. See `API Guide / Links and
References
- `_
+ `_
for more info.
in: body
required: true
@@ -2896,6 +3220,8 @@ flavor_swap:
The size of a dedicated swap disk that will be allocated, in
MiB. If 0 (the default), no dedicated swap disk will be created.
Currently, the empty string ('') is used to represent 0.
+ As of microversion 2.75 default return value of swap is 0
+ instead of empty string.
in: body
required: true
type: integer
@@ -2968,16 +3294,22 @@ floating_ip_obj:
type: object
floating_ip_pool_name:
description: |
- The name of the floating ip pool.
+ The name of the floating IP pool.
in: body
required: true
type: string
floating_ip_pool_name_optional:
description: |
- The name of the floating ip pool
+ The name of the floating IP pool
in: body
required: false
type: string
+floating_ip_pool_name_or_id:
+ description: |
+ The name or ID of the floating IP pool.
+ in: body
+ required: true
+ type: string
floating_ip_pools:
description: |
The ``floating_ip_pools`` object.
@@ -3048,6 +3380,7 @@ force_evacuate:
required: false
type: boolean
min_version: 2.29
+ max_version: 2.67
force_live_migrate:
description: |
Force a live-migration by not verifying the provided destination host by
@@ -3061,13 +3394,13 @@ force_live_migrate:
required: false
type: boolean
min_version: 2.30
+ max_version: 2.67
force_migration_complete:
description: |
The action to force an in-progress live migration to complete.
in: body
required: true
type: none
- min_version: 2.22
force_snapshot:
description: |
Indicates whether to create a snapshot, even if the volume is attached.
@@ -3079,8 +3412,9 @@ force_snapshot:
forced_down_2_11:
description: |
Whether or not this service was forced down manually by an
- administrator. This value is useful to know that some 3rd party has
- verified the service should be marked down.
+ administrator after the service was fenced. This value is useful
+ to know that some 3rd party has verified the service should be
+ marked down.
in: body
required: true
type: boolean
@@ -3089,9 +3423,17 @@ forced_down_2_11:
# PUT /os-services/{service_id} added in 2.53.
forced_down_2_53_in:
description: |
- Whether or not this service was forced down manually by an
- administrator. This value is useful to know that some 3rd party has
- verified the service should be marked down.
+ ``forced_down`` is a manual override to tell nova that the service in
+ question has been fenced manually by the operations team (either hard
+ powered off, or network unplugged). That signals that it is safe to proceed
+ with ``evacuate`` or other operations that nova has safety checks to
+ prevent for hosts that are up.
+
+ .. warning::
+
+ Setting a service forced down without completely fencing it will likely
+ result in the corruption of VMs on that host.
+
in: body
required: false
type: boolean
@@ -3100,8 +3442,9 @@ forced_down_2_53_in:
forced_down_2_53_out:
description: |
Whether or not this service was forced down manually by an
- administrator. This value is useful to know that some 3rd party has
- verified the service should be marked down.
+ administrator after the service was fenced. This value is useful
+ to know that some 3rd party has verified the service should be
+ marked down.
in: body
required: true
type: boolean
@@ -3113,13 +3456,16 @@ forceDelete:
type: none
free_ram_mb:
description: |
- The free RAM in this hypervisor(in MB).
+ The free RAM in this hypervisor(in MiB). This does not take allocation
+ ratios used for overcommit into account so this value may be negative.
in: body
required: true
type: integer
+ max_version: 2.87
free_ram_mb_total:
description: |
- The free RAM on all hypervisors(in MB).
+ The free RAM on all hypervisors(in MiB). This does not take allocation
+ ratios used for overcommit into account so this value may be negative.
in: body
required: true
type: integer
@@ -3179,7 +3525,7 @@ host_cpu:
type: integer
host_disk_gb:
description: |
- The disk size on the host (in GB).
+ The disk size on the host (in GiB).
in: body
required: true
type: integer
@@ -3210,7 +3556,7 @@ host_maintenance_mode_in:
type: string
host_memory_mb:
description: |
- The memory size on the host (in MB).
+ The memory size on the host (in MiB).
in: body
required: true
type: integer
@@ -3328,6 +3674,22 @@ host_status_body_in:
in: body
required: false
type: string
+host_status_update_rebuild:
+ description: |
+ The host status. Values where next value in list can override the previous:
+
+ - ``UP`` if nova-compute up.
+ - ``UNKNOWN`` if nova-compute not reported by servicegroup driver.
+ - ``DOWN`` if nova-compute forced down.
+ - ``MAINTENANCE`` if nova-compute is disabled.
+ - Empty string indicates there is no host for server.
+
+ This attribute appears in the response only if the policy permits.
+ By default, only administrators can get this parameter.
+ in: body
+ required: false
+ type: string
+ min_version: 2.75
host_zone:
description: |
The available zone of the host.
@@ -3398,20 +3760,25 @@ hypervisor_diagnostics:
min_version: 2.48
hypervisor_free_disk_gb:
description: |
- The free disk remaining on this hypervisor(in GB).
+ The free disk remaining on this hypervisor(in GiB). This does not take
+ allocation ratios used for overcommit into account so this value may be
+ negative.
in: body
required: true
type: integer
+ max_version: 2.87
hypervisor_free_disk_gb_total:
description: |
- The free disk remaining on all hypervisors(in GB).
+ The free disk remaining on all hypervisors(in GiB). This does not take
+ allocation ratios used for overcommit into account so this value may be
+ negative.
in: body
required: true
type: integer
hypervisor_hostname:
description: |
- The hypervisor host name provided by the Nova virt driver. For the Ironic driver,
- it is the Ironic node uuid.
+ The hypervisor host name provided by the Nova virt driver. For the Ironic
+ driver, it is the Ironic node uuid.
in: body
required: true
type: string
@@ -3439,7 +3806,7 @@ hypervisor_links:
description: |
Links to the hypervisors resource. See `API Guide / Links and
References
- `_
+ `_
for more info.
in: body
type: array
@@ -3455,8 +3822,10 @@ hypervisor_os_diagnostics:
hypervisor_servers:
description: |
A list of ``server`` objects.
+ This field has become mandatory in microversion 2.75. If no servers is on hypervisor
+ then empty list is returned.
in: body
- required: false
+ required: true
type: array
min_version: 2.53
hypervisor_servers_name:
@@ -3509,27 +3878,41 @@ hypervisor_type_body:
in: body
required: true
type: string
+hypervisor_uptime:
+ description: |
+ The total uptime of the hypervisor and information about average load. Only
+ reported for active hosts where the virt driver supports this feature.
+ in: body
+ required: true
+ type: string
+ min_version: 2.88
hypervisor_vcpus:
description: |
- The number of vcpu in this hypervisor.
+ The number of vCPU in this hypervisor. This does not take allocation
+ ratios used for overcommit into account so there may be disparity between
+ this and the used count.
in: body
required: true
type: integer
+ max_version: 2.87
hypervisor_vcpus_total:
description: |
- The number of vcpu on all hypervisors.
+ The number of vCPU on all hypervisors. This does not take allocation
+ ratios used for overcommit into account so there may be disparity between
+ this and the used count.
in: body
required: true
type: integer
hypervisor_vcpus_used:
description: |
- The number of vcpu used in this hypervisor.
+ The number of vCPU used in this hypervisor.
in: body
required: true
type: integer
+ max_version: 2.87
hypervisor_vcpus_used_total:
description: |
- The number of vcpu used on all hypervisors.
+ The number of vCPU used on all hypervisors.
in: body
required: true
type: integer
@@ -3548,7 +3931,7 @@ hypervisors:
image:
description: |
The UUID and links for the image for your server instance. The ``image`` object
- might be an empty string when you boot the server from a volume.
+ will be an empty string when you boot the server from a volume.
in: body
required: true
type: object
@@ -3725,7 +4108,7 @@ injectNetworkInfo:
type: none
instance_action_events_2_50:
description: |
- The events which occurred in this action.
+ The events which occurred in this action in descending order of creation.
Policy defaults enable only users with the administrative role to see
instance action event information. Cloud providers can change these
@@ -3736,7 +4119,7 @@ instance_action_events_2_50:
max_version: 2.50
instance_action_events_2_51:
description: |
- The events which occurred in this action.
+ The events which occurred in this action in descending order of creation.
Policy defaults enable only users with the administrative role or the owner
of the server to see instance action event information. Cloud providers can
@@ -3749,8 +4132,8 @@ instance_actions_next_links:
description: |
Links pertaining to the instance action.
This parameter is returned when paging and more data is available.
- See `API Guide / Links and References
- `_
+ See `Paginated collections
+ `__
for more info.
in: body
required: false
@@ -3807,7 +4190,7 @@ instanceAction:
type: object
instanceActions:
description: |
- List of the actions for the given instance.
+ List of the actions for the given instance in descending order of creation.
in: body
required: true
type: array
@@ -3941,6 +4324,13 @@ key_name_resp:
in: body
required: true
type: string
+key_name_resp_update:
+ description: |
+ The name of associated key pair, if any.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
key_pairs: &key_pairs
description: |
The number of allowed key pairs for each user.
@@ -3958,6 +4348,11 @@ key_pairs_quota_details:
description: |
The object of detailed key pairs quota, including in_use, limit and
reserved number of key pairs.
+
+ .. note:: ``in_use`` field value for keypair quota details is always
+ zero. In Nova, key_pairs are a user-level resource, not a project-
+ level resource, so for legacy reasons, the keypair in-use information
+ is not counted.
in: body
required: true
type: object
@@ -3996,7 +4391,7 @@ keypair_links:
description: |
Links pertaining to keypair. See `API Guide / Links and
References
- `_
+ `_
for more info.
in: body
type: array
@@ -4113,17 +4508,20 @@ links:
description: |
Links to the resources in question. See `API Guide / Links and
References
- `_
+ `_
for more info.
in: body
required: true
type: array
local_gb:
description: |
- The disk in this hypervisor(in GB).
+ The disk in this hypervisor (in GiB). This does not take allocation
+ ratios used for overcommit into account so there may be disparity between
+ this and the used count.
in: body
required: true
type: integer
+ max_version: 2.87
local_gb_simple_tenant_usage:
description: |
The sum of the root disk size of the server and
@@ -4140,28 +4538,33 @@ local_gb_simple_tenant_usage_optional:
type: integer
local_gb_total:
description: |
- The disk on all hypervisors(in GB).
+ The disk on all hypervisors (in GiB). This does not take allocation
+ ratios used for overcommit into account so there may be disparity between
+ this and the used count.
in: body
required: true
type: integer
local_gb_used:
description: |
- The disk used in this hypervisor(in GB).
+ The disk used in this hypervisor (in GiB).
in: body
required: true
type: integer
+ max_version: 2.87
local_gb_used_total:
description: |
- The disk used on all hypervisors(in GB).
+ The disk used on all hypervisors (in GiB).
in: body
required: true
type: integer
lock:
description: |
The action to lock a server.
+ This parameter can be ``null``.
+ Up to microversion 2.73, this parameter should be ``null``.
in: body
required: true
- type: none
+ type: object
locked:
description: |
True if the instance is locked otherwise False.
@@ -4169,6 +4572,20 @@ locked:
required: true
type: boolean
min_version: 2.9
+locked_reason_req:
+ description: |
+ The reason behind locking a server. Limited to 255 characters in length.
+ in: body
+ required: false
+ type: string
+ min_version: 2.73
+locked_reason_resp:
+ description: |
+ The reason behind locking a server.
+ in: body
+ required: true
+ type: string
+ min_version: 2.73
mac_addr:
description: |
The MAC address.
@@ -4210,47 +4627,53 @@ memory_details_diagnostics:
The dictionary with information about VM memory usage.
Following fields are presented in the dictionary:
- - ``maximum`` - Amount of memory provisioned for the VM in MB (Integer)
+ - ``maximum`` - Amount of memory provisioned for the VM in MiB (Integer)
- ``used`` - Amount of memory that is currently used by the guest operating
- system and its applications in MB (Integer)
+ system and its applications in MiB (Integer)
in: body
required: true
type: array
min_version: 2.48
memory_mb:
description: |
- The memory of this hypervisor(in MB).
+ The memory of this hypervisor (in MiB). This does not take allocation
+ ratios used for overcommit into account so there may be disparity between
+ this and the used count.
in: body
required: true
type: integer
+ max_version: 2.87
memory_mb_simple_tenant_usage:
description: |
- The memory size of the server (in MB).
+ The memory size of the server (in MiB).
in: body
required: true
type: integer
memory_mb_simple_tenant_usage_optional:
description: |
- The memory size of the server (in MB).
+ The memory size of the server (in MiB).
in: body
required: false
type: integer
memory_mb_total:
description: |
- The memory of all hypervisors(in MB).
+ The memory of all hypervisors (in MiB). This does not take allocation
+ ratios used for overcommit into account so there may be disparity between
+ this and the used count.
in: body
required: true
type: integer
memory_mb_used:
description: |
- The memory used in this hypervisor(in MB).
+ The memory used in this hypervisor (in MiB).
in: body
required: true
type: integer
+ max_version: 2.87
memory_mb_used_total:
description: |
- The memory used on all hypervisors(in MB).
+ The memory used on all hypervisors(in MiB).
in: body
required: true
type: integer
@@ -4411,8 +4834,8 @@ migration_links_2_23:
Links to the migration.
This parameter is returned if the migration type is ``live-migration`` and
the migration status is one of ``queued``, ``preparing``, ``running``
- and ``post-migrating``. See `API Guide / Links and References
- `_
+ and ``post-migrating``. See `Paginated collections
+ `__
for more info.
in: body
required: false
@@ -4434,8 +4857,8 @@ migration_next_links_2_59:
description: |
Links pertaining to the migration.
This parameter is returned when paging and more data is available.
- See `API Guide / Links and References
- `_
+ See `Paginated collections
+ `__
for more info.
in: body
required: false
@@ -4480,7 +4903,7 @@ minDisk_body:
type: integer
minRam_body:
description: |
- The minimum amount of RAM an image requires to function, in MB. For example, ``512``.
+ The minimum amount of RAM an image requires to function, in MiB. For example, ``512``.
in: body
required: true
type: integer
@@ -4502,6 +4925,13 @@ name_server_group:
in: body
required: true
type: string
+name_update_rebuild:
+ description: |
+ The security group name.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
namespace:
description: |
A URL pointing to the namespace for this extension.
@@ -4691,18 +5121,21 @@ os-availability-zone:availability_zone:
want your instance to be built. Typically, an admin user will use
availability zones to arrange OpenStack compute hosts into logical
groups.
+
An availability zone provides a form of physical isolation and redundancy from
other availability zones. For instance, if some racks in your data center are
on a separate power source, you can put servers in those racks in their own availability
zone. Availability zones can also help separate different classes of hardware. By
segregating resources into availability zones, you can ensure that your application
resources are spread across disparate machines to achieve high availability in
- the event of hardware or other failure.
+ the event of hardware or other failure. See
+ `Availability Zones (AZs) `_ for more information.
+
You can list the available availability zones by calling the
- os-availability-zone API, but you should avoid using the default
- availability zone when booting the instance. In general, the
- default availability zone is named ``nova``. This AZ is only shown
- when listing the availability zones as an admin.
+ :ref:`os-availability-zone` API, but you should avoid using the `default
+ availability zone `_
+ when creating the server. The default availability zone is named ``nova``.
+ This AZ is only shown when listing the availability zones as an admin.
in: body
required: false
type: string
@@ -4739,6 +5172,13 @@ OS-EXT-AZ:availability_zone_optional:
in: body
required: false
type: string
+OS-EXT-AZ:availability_zone_update_rebuild:
+ description: |
+ The availability zone name.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
OS-EXT-SRV-ATTR:host:
description: |
The name of the compute host on which this instance is running.
@@ -4746,6 +5186,14 @@ OS-EXT-SRV-ATTR:host:
in: body
required: true
type: string
+OS-EXT-SRV-ATTR:host_update_rebuild:
+ description: |
+ The name of the compute host on which this instance is running.
+ Appears in the response for administrative users only.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
OS-EXT-SRV-ATTR:hypervisor_hostname:
description: |
The hypervisor host name provided by the Nova virt driver. For the Ironic driver,
@@ -4753,6 +5201,14 @@ OS-EXT-SRV-ATTR:hypervisor_hostname:
in: body
required: true
type: string
+OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild:
+ description: |
+ The hypervisor host name provided by the Nova virt driver. For the Ironic driver,
+ it is the Ironic node uuid. Appears in the response for administrative users only.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
OS-EXT-SRV-ATTR:instance_name:
description: |
The instance name. The Compute API generates the instance name from the instance
@@ -4760,6 +5216,14 @@ OS-EXT-SRV-ATTR:instance_name:
in: body
required: true
type: string
+OS-EXT-SRV-ATTR:instance_name_update_rebuild:
+ description: |
+ The instance name. The Compute API generates the instance name from the instance
+ name template. Appears in the response for administrative users only.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
OS-EXT-STS:power_state:
description: |
The power state of the instance. This is an enum value that is mapped as::
@@ -4773,18 +5237,46 @@ OS-EXT-STS:power_state:
in: body
required: true
type: integer
+OS-EXT-STS:power_state_update_rebuild:
+ description: |
+ The power state of the instance. This is an enum value that is mapped as::
+
+ 0: NOSTATE
+ 1: RUNNING
+ 3: PAUSED
+ 4: SHUTDOWN
+ 6: CRASHED
+ 7: SUSPENDED
+ in: body
+ required: true
+ type: integer
+ min_version: 2.75
OS-EXT-STS:task_state:
description: |
The task state of the instance.
in: body
required: true
type: string
+OS-EXT-STS:task_state_update_rebuild:
+ description: |
+ The task state of the instance.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
OS-EXT-STS:vm_state:
description: |
The VM state.
in: body
required: true
type: string
+OS-EXT-STS:vm_state_update_rebuild:
+ description: |
+ The VM state.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
os-extended-volumes:volumes_attached:
description: |
The attached volumes, if any.
@@ -4794,19 +5286,39 @@ os-extended-volumes:volumes_attached:
os-extended-volumes:volumes_attached.delete_on_termination:
description: |
A flag indicating if the attached volume will be deleted
- when the server is deleted. By default this is False and
- can only be set when creating a volume while creating a
- server, which is commonly referred to as boot from volume.
+ when the server is deleted. By default this is False.
in: body
required: true
type: boolean
min_version: 2.3
+os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild:
+ description: |
+ A flag indicating if the attached volume will be deleted
+ when the server is deleted. By default this is False.
+ in: body
+ required: true
+ type: boolean
+ min_version: 2.75
os-extended-volumes:volumes_attached.id:
description: |
The attached volume ID.
in: body
required: true
type: string
+os-extended-volumes:volumes_attached.id_update_rebuild:
+ description: |
+ The attached volume ID.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
+os-extended-volumes:volumes_attached_update_rebuild:
+ description: |
+ The attached volumes, if any.
+ in: body
+ required: true
+ type: array
+ min_version: 2.75
os-getConsoleOutput:
description: |
The action to get console output of the server.
@@ -4875,7 +5387,7 @@ os-getVNCConsole:
type: object
os-getVNCConsole-type:
description: |
- The type of VNC console. The valid values are ``novnc`` and ``xvpvnc``.
+ The type of VNC console. The only valid value is ``novnc``.
in: body
required: true
type: string
@@ -4920,6 +5432,24 @@ OS-SRV-USG:launched_at:
in: body
required: true
type: string
+OS-SRV-USG:launched_at_update_rebuild:
+ description: |
+ The date and time when the server was launched.
+
+ The date and time stamp format is `ISO 8601 `_:
+
+ ::
+
+ CCYY-MM-DDThh:mm:ss±hh:mm
+
+ For example, ``2015-08-27T09:49:58-05:00``.
+
+ The ``hh±:mm`` value, if included, is the time zone as an offset from UTC.
+ If the ``deleted_at`` date and time stamp is not set, its value is ``null``.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
OS-SRV-USG:terminated_at:
description: |
The date and time when the server was deleted.
@@ -4936,6 +5466,23 @@ OS-SRV-USG:terminated_at:
in: body
required: true
type: string
+OS-SRV-USG:terminated_at_update_rebuild:
+ description: |
+ The date and time when the server was deleted.
+
+ The date and time stamp format is `ISO 8601 `_:
+
+ ::
+
+ CCYY-MM-DDThh:mm:ss±hh:mm
+
+ For example, ``2015-08-27T09:49:58-05:00``.
+ The ``±hh:mm`` value, if included, is the time zone as an offset from UTC.
+ If the ``deleted_at`` date and time stamp is not set, its value is ``null``.
+ in: body
+ required: true
+ type: string
+ min_version: 2.75
os-start:
description: |
The action to start a stopped server.
@@ -4993,7 +5540,7 @@ os:scheduler_hints_cidr:
os:scheduler_hints_different_cell:
description: |
A list of cell routes or a cell route (string).
- Schedule the server in a cell that is not specifiled.
+ Schedule the server in a cell that is not specified.
It is available when ``DifferentCellFilter`` is available on cloud side
that is cell v1 environment.
in: body
@@ -5023,7 +5570,7 @@ os:scheduler_hints_query:
Schedule the server by using a custom filter in JSON format.
For example::
- "query": "[>=,$free_ram_mb,1024]"
+ "query": "[\">=\",\"$free_ram_mb\",1024]"
It is available when ``JsonFilter`` is available on cloud side.
in: body
@@ -5040,7 +5587,7 @@ os:scheduler_hints_same_host:
type: array
os:scheduler_hints_target_cell:
description: |
- A target cell name. Schedule the server in a host in the cell specifiled.
+ A target cell name. Schedule the server in a host in the cell specified.
It is available when ``TargetCellFilter`` is available on cloud side
that is cell v1 environment.
in: body
@@ -5157,7 +5704,7 @@ policy_name:
instead of resulting in a build failure.
in: body
required: true
- type: object
+ type: string
min_version: 2.64
policy_rules:
description: |
@@ -5272,9 +5819,23 @@ project_id:
in: body
required: false
type: string
-project_id_instance_action:
+project_id_migration_2_80:
description: |
- The UUID of the project that this server belongs to.
+ The ID of the project which initiated the server migration. The value
+ may be ``null`` for older migration records.
+ in: body
+ required: true
+ type: string
+ min_version: 2.80
+project_id_server:
+ description: |
+ The ID of the project that this server belongs to.
+ in: body
+ required: true
+ type: string
+project_id_server_action:
+ description: |
+ The ID of the project which initiated the server action.
in: body
required: true
type: string
@@ -5314,14 +5875,14 @@ quota_tenant_or_user_id_body:
type: string
ram: &ram
description: |
- The amount of allowed server RAM, in MB, for each tenant.
+ The amount of allowed server RAM, in MiB, for each tenant.
in: body
required: true
type: integer
ram_quota_class: &ram_quota_class
<<: *ram
description: |
- The amount of allowed instance RAM, in MB, for the quota class.
+ The amount of allowed instance RAM, in MiB, for the quota class.
ram_quota_class_optional:
<<: *ram_quota_class
required: false
@@ -5334,7 +5895,7 @@ ram_quota_details:
type: object
ram_quota_optional:
description: |
- The amount of allowed server RAM, in MB, for each tenant.
+ The amount of allowed server RAM, in MiB, for each tenant.
in: body
required: false
type: integer
@@ -5375,7 +5936,7 @@ remote_console_protocol:
type: string
remote_console_type:
description: |
- The type of remote console. The valid values are ``novnc``, ``xvpvnc``,
+ The type of remote console. The valid values are ``novnc``,
``rdp-html5``, ``spice-html5``, ``serial``, and ``webmks``. The type
``webmks`` is added since Microversion ``2.8``.
in: body
@@ -5498,13 +6059,14 @@ rules:
type: array
running_vms:
description: |
- The number of running vms on this hypervisor.
+ The number of running VMs on this hypervisor.
in: body
required: true
type: integer
+ max_version: 2.87
running_vms_total:
description: |
- The total number of running vms on all hypervisors.
+ The total number of running VMs on all hypervisors.
in: body
required: true
type: integer
@@ -5615,6 +6177,19 @@ security_groups_obj:
in: body
required: true
type: array
+security_groups_obj_optional:
+ description: |
+ One or more security groups objects.
+ in: body
+ required: false
+ type: array
+security_groups_obj_update_rebuild:
+ description: |
+ One or more security groups objects.
+ in: body
+ required: false
+ type: array
+ min_version: 2.75
security_groups_quota:
description: |
The number of allowed security groups for each tenant.
@@ -5672,7 +6247,7 @@ server_description_resp:
min_version: 2.19
server_group:
description: |
- The server group obejct.
+ The server group object.
in: body
required: true
type: object
@@ -5710,6 +6285,14 @@ server_groups: &server_groups
in: body
required: true
type: integer
+server_groups_2_71:
+ description: |
+ The UUIDs of the server groups to which the server belongs. Currently
+ this can contain at most one entry.
+ in: body
+ required: true
+ type: array
+ min_version: 2.71
server_groups_list:
description: |
The list of existing server groups.
@@ -5739,14 +6322,57 @@ server_groups_quota_optional:
in: body
required: false
type: integer
-server_hostname:
+# This is the host in a POST (create instance) request body.
+server_host_create:
+ description: |
+ The name of the compute service host on which the server is to be created.
+ The API will return 400 if no compute services are found with the given
+ host name. By default, it can be specified by administrators only.
+ in: body
+ required: false
+ type: string
+ min_version: 2.74
+server_hostname: &server_hostname
in: body
required: false
type: string
description: |
- The hostname set on the instance when it is booted.
- By default, it appears in the response for administrative users only.
+ The hostname of the instance reported in the metadata service.
+ This parameter only appears in responses for administrators until
+ microversion 2.90, after which it is shown for all users.
+
+ .. note::
+
+ This information is published via the metadata service and requires
+ application such as ``cloud-init`` to propogate it through to the
+ instance.
min_version: 2.3
+server_hostname_req:
+ in: body
+ required: false
+ type: string
+ description: |
+ The hostname to configure for the instance in the metadata service.
+
+ .. note::
+
+ This information is published via the metadata service and requires
+ application such as ``cloud-init`` to propogate it through to the
+ instance.
+ min_version: 2.90
+server_hostname_update_rebuild:
+ <<: *server_hostname
+ min_version: 2.75
+# This is the hypervisor_hostname in a POST (create instance) request body.
+server_hypervisor_hostname_create:
+ description: |
+ The hostname of the hypervisor on which the server is to be created.
+ The API will return 400 if no hypervisors are found with the given
+ hostname. By default, it can be specified by administrators only.
+ in: body
+ required: false
+ type: string
+ min_version: 2.74
server_id:
description: |
The UUID of the server.
@@ -5767,6 +6393,14 @@ server_kernel_id:
The UUID of the kernel image when using an AMI. Will be null if not.
By default, it appears in the response for administrative users only.
min_version: 2.3
+server_kernel_id_update_rebuild:
+ in: body
+ required: false
+ type: string
+ description: |
+ The UUID of the kernel image when using an AMI. Will be null if not.
+ By default, it appears in the response for administrative users only.
+ min_version: 2.75
server_launch_index:
in: body
required: false
@@ -5776,11 +6410,20 @@ server_launch_index:
sequence in which the servers were launched.
By default, it appears in the response for administrative users only.
min_version: 2.3
+server_launch_index_update_rebuild:
+ in: body
+ required: false
+ type: integer
+ description: |
+ When servers are launched via multiple create, this is the
+ sequence in which the servers were launched.
+ By default, it appears in the response for administrative users only.
+ min_version: 2.75
server_links:
description: |
Links pertaining to the server. See `API Guide / Links and
References
- `_
+ `_
for more info.
in: body
type: array
@@ -5805,6 +6448,14 @@ server_ramdisk_id:
The UUID of the ramdisk image when using an AMI. Will be null if not.
By default, it appears in the response for administrative users only.
min_version: 2.3
+server_ramdisk_id_update_rebuild:
+ in: body
+ required: false
+ type: string
+ description: |
+ The UUID of the ramdisk image when using an AMI. Will be null if not.
+ By default, it appears in the response for administrative users only.
+ min_version: 2.75
server_reservation_id:
in: body
required: false
@@ -5815,6 +6466,16 @@ server_reservation_id:
create, that will all have the same reservation_id.
By default, it appears in the response for administrative users only.
min_version: 2.3
+server_reservation_id_update_rebuild:
+ in: body
+ required: false
+ type: string
+ description: |
+ The reservation id for the server. This is an id that can
+ be useful in tracking groups of servers created with multiple
+ create, that will all have the same reservation_id.
+ By default, it appears in the response for administrative users only.
+ min_version: 2.75
server_root_device_name:
in: body
required: false
@@ -5823,6 +6484,14 @@ server_root_device_name:
The root device name for the instance
By default, it appears in the response for administrative users only.
min_version: 2.3
+server_root_device_name_update_rebuild:
+ in: body
+ required: false
+ type: string
+ description: |
+ The root device name for the instance
+ By default, it appears in the response for administrative users only.
+ min_version: 2.75
server_status:
description: |
The server status.
@@ -5849,6 +6518,77 @@ server_tags_create:
required: false
type: array
min_version: 2.52
+server_topology_nodes:
+ description: |
+ NUMA nodes information of a server.
+ in: body
+ required: true
+ type: array
+server_topology_nodes_cpu_pinning:
+ description: |
+ The mapping of server cores to host physical CPU. for example::
+
+ cpu_pinning: { 0: 0, 1: 5}
+
+ This means vcpu 0 is mapped to physical CPU 0, and vcpu 1 is mapped
+ physical CPU 5.
+
+ By default the ``cpu_pinning`` field is only visible to users with the
+ administrative role. You can change the default behavior via the policy
+ rule::
+
+ compute:server:topology:host:index
+ in: body
+ required: false
+ type: dict
+server_topology_nodes_cpu_siblings:
+ description: |
+ A mapping of host cpus thread sibling. For example::
+
+ siblings: [[0,1],[2,3]]
+
+ This means vcpu 0 and vcpu 1 belong to same CPU core, vcpu 2, vcpu 3
+ belong to another CPU core.
+
+ By default the ``siblings`` field is only visible to users with the
+ administrative role. You can change the default behavior via the policy
+ rule::
+
+ compute:server:topology:host:index
+ in: body
+ required: false
+ type: list
+server_topology_nodes_host_node:
+ description: |
+ The host NUMA node the virtual NUMA node is map to.
+
+ By default the ``host_node`` field is only visible to users with the
+ administrator role. You can change the default behavior via the policy
+ rule::
+
+ compute:server:topology:host:index
+ in: body
+ required: false
+ type: integer
+server_topology_nodes_memory_mb:
+ description: |
+ The amount of memory assigned to this NUMA node in MB.
+ in: body
+ required: false
+ type: integer
+server_topology_nodes_vcpu_set:
+ description: |
+ A list of IDs of the virtual CPU assigned to this NUMA node.
+ in: body
+ required: false
+ type: list
+server_topology_pagesize_kb:
+ description: |
+ The page size in KB of a server. This field is ``null`` if the
+ page size information is not available.
+ in: body
+ required: true
+ type: integer
server_trusted_image_certificates_create_req:
description: |
A list of trusted certificate IDs, which are used during image
@@ -5904,6 +6644,14 @@ server_user_data:
The user_data the instance was created with.
By default, it appears in the response for administrative users only.
min_version: 2.3
+server_user_data_update:
+ in: body
+ required: false
+ type: string
+ description: |
+ The user_data the instance was created with.
+ By default, it appears in the response for administrative users only.
+ min_version: 2.75
server_uuid:
description: |
The UUID of the server instance to which the API dispatches the event. You must
@@ -5922,8 +6670,8 @@ servers_links:
description: |
Links to the next server. It is available when the number of servers exceeds
``limit`` parameter or ``[api]/max_limit`` in the configuration file.
- See `API Guide / Links and References
- `_
+ See `Paginated collections
+ `__
for more info.
in: body
type: array
@@ -6215,6 +6963,12 @@ tags:
required: true
type: array
min_version: 2.26
+tags_no_min:
+ description: |
+ A list of tags. The maximum count of tags in this list is 50.
+ in: body
+ required: true
+ type: array
tenant_id_body:
description: |
The UUID of the tenant in a multi-tenancy cloud.
@@ -6297,7 +7051,7 @@ total_local_gb_usage:
type: float
total_memory_mb_usage:
description: |
- Multiplying the server memory size (in MB) by hours the server exists,
+ Multiplying the server memory size (in MiB) by hours the server exists,
and then adding that all together for each server.
in: body
required: true
@@ -6465,7 +7219,7 @@ usage_links:
description: |
Links pertaining to usage. See `API Guide / Links and
References
- `_
+ `_
for more info.
in: body
type: array
@@ -6505,6 +7259,20 @@ user_id:
in: body
required: true
type: string
+user_id_migration_2_80:
+ description: |
+ The ID of the user which initiated the server migration. The value
+ may be ``null`` for older migration records.
+ in: body
+ required: true
+ type: string
+ min_version: 2.80
+user_id_server_action:
+ description: |
+ The ID of the user which initiated the server action.
+ in: body
+ required: true
+ type: string
user_id_server_group:
description: |
The user ID who owns the server group.
@@ -6625,6 +7393,13 @@ volume:
in: body
required: true
type: object
+volume_attachment_id_resp:
+ description: |
+ The volume ID of the attachment.
+ in: body
+ required: true
+ type: string
+ max_version: 2.88
volume_id:
description: |
The source volume ID.
@@ -6639,9 +7414,20 @@ volume_id_resp:
type: string
volume_size:
description: |
- The size of the volume (in GB).
+ The size of the volume (in GiB).
This is integer value from range 1 to 2147483647
which can be requested as integer and string.
+ This parameter must be specified in the following cases:
+
+ - An image to volume case
+
+ * ``block_device_mapping_v2.source_type`` is ``image``
+ * ``block_device_mapping_v2.destination_type`` is ``volume``
+
+ - A blank to volume case
+
+ * ``block_device_mapping_v2.source_type`` is ``blank``
+ * ``block_device_mapping_v2.destination_type`` is ``volume``
in: body
required: false
type: integer
@@ -6683,7 +7469,8 @@ volumeAttachment_post:
volumeAttachment_put:
description: |
A dictionary representation of a volume attachment containing the field
- ``volumeId`` which is the UUID of the replacement volume.
+ ``volumeId`` which is the UUID of the replacement volume, and other fields
+ to update in the attachment.
in: body
required: true
type: object
diff --git a/api-ref/source/request-ids.inc b/api-ref/source/request-ids.inc
index 76c0efafedb..4df4c40b9ca 100644
--- a/api-ref/source/request-ids.inc
+++ b/api-ref/source/request-ids.inc
@@ -8,7 +8,7 @@ Users can specify the global request ID in the request header.
Users can receive the local request ID in the response header.
For more details about Request IDs, please reference: `Faults
-`_
+`_
**Request**
diff --git a/api-ref/source/server-migrations.inc b/api-ref/source/server-migrations.inc
index f0d45f2d444..52e413470ff 100644
--- a/api-ref/source/server-migrations.inc
+++ b/api-ref/source/server-migrations.inc
@@ -53,10 +53,12 @@ Response
- status: migrate_status
- updated_at: updated
- uuid: migration_uuid
+ - user_id: user_id_migration_2_80
+ - project_id: project_id_migration_2_80
-**Example List Migrations (2.59)**
+**Example List Migrations (2.80)**
-.. literalinclude:: ../../doc/api_samples/server-migrations/v2.59/migrations-index.json
+.. literalinclude:: ../../doc/api_samples/server-migrations/v2.80/migrations-index.json
:language: javascript
Show Migration Details
@@ -107,10 +109,12 @@ Response
- status: migrate_status
- updated_at: updated
- uuid: migration_uuid
+ - user_id: user_id_migration_2_80
+ - project_id: project_id_migration_2_80
-**Example Show Migration Details (2.59)**
+**Example Show Migration Details (2.80)**
-.. literalinclude:: ../../doc/api_samples/server-migrations/v2.59/migrations-get.json
+.. literalinclude:: ../../doc/api_samples/server-migrations/v2.80/migrations-get.json
:language: javascript
Force Migration Complete Action (force_complete Action)
@@ -124,9 +128,11 @@ Specify the ``force_complete`` action in the request body.
.. note:: Microversion 2.22 or greater is required for this API.
-.. note:: Not all compute back ends support forcefully completing an
+.. note:: Not all `compute back ends`_ support forcefully completing an
in-progress live migration.
+.. _compute back ends: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_force_live_migration_to_complete
+
Policy defaults enable only users with the administrative role to perform
this operation. Cloud providers can change these permissions through the
``policy.json`` file.
@@ -148,9 +154,11 @@ to determine whether the request succeeded.
**Troubleshooting**
-If the server status remains ``ACTIVE`` for an inordinate amount of time, the
-request may have failed. Ensure you meet the preconditions and run the request
-again. If the request fails again, investigate the compute back end.
+If the server status remains ``MIGRATING`` for an inordinate amount of time,
+the request may have failed. Ensure you meet the preconditions and run the
+request again. If the request fails again, investigate the compute back end.
+More details can be found in the
+`admin guide `_.
Normal response codes: 202
@@ -187,9 +195,11 @@ Abort an in-progress live migration.
.. note:: With microversion 2.65 or greater, you can abort live migrations
also in ``queued`` and ``preparing`` status.
-.. note:: Not all compute back ends support aborting an in-progress live
+.. note:: Not all `compute back ends`__ support aborting an in-progress live
migration.
+.. __: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_abort_in_progress_live_migration
+
Policy defaults enable only users with the administrative role to perform
this operation. Cloud providers can change these permissions through the
``policy.json`` file.
@@ -215,7 +225,7 @@ using::
**Troubleshooting**
-If the server task_state remains ``migrating`` for an inordinate amount of
+If the server status remains ``MIGRATING`` for an inordinate amount of
time, the request may have failed. Ensure you meet the preconditions and run
the request again. If the request fails again, investigate the compute back
end.
diff --git a/api-ref/source/server-topology.inc b/api-ref/source/server-topology.inc
new file mode 100644
index 00000000000..014f713fa04
--- /dev/null
+++ b/api-ref/source/server-topology.inc
@@ -0,0 +1,52 @@
+.. -*- rst -*-
+
+=====================================
+Servers Topology (servers, topology)
+=====================================
+
+Shows the NUMA topology information for a server.
+
+Show Server Topology
+====================
+
+.. rest_method:: GET /servers/{server_id}/topology
+.. versionadded:: 2.78
+
+Shows NUMA topology information for a server.
+
+Policy defaults enable only users with the administrative role or the owners
+of the server to perform this operation. Cloud providers can change these
+permissions through the ``policy.json`` file.
+
+Normal response codes: 200
+
+Error response codes: unauthorized(401), notfound(404), forbidden(403)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - server_id: server_id_path
+
+Response
+--------
+
+All response fields are listed below. If some information is not available or
+not allow by policy, the corresponding key value will not exist in response.
+
+.. rest_parameters:: parameters.yaml
+
+ - nodes: server_topology_nodes
+ - nodes.cpu_pinning: server_topology_nodes_cpu_pinning
+ - nodes.vcpu_set: server_topology_nodes_vcpu_set
+ - nodes.siblings: server_topology_nodes_cpu_siblings
+ - nodes.memory_mb: server_topology_nodes_memory_mb
+ - nodes.host_node: server_topology_nodes_host_node
+ - pagesize_kb: server_topology_pagesize_kb
+
+**Example Server topology (2.xx)**
+
+.. literalinclude:: ../../doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json
+ :language: javascript
+
diff --git a/api-ref/source/servers-action-evacuate.inc b/api-ref/source/servers-action-evacuate.inc
index 35fe67eccbb..8ae3d22093c 100644
--- a/api-ref/source/servers-action-evacuate.inc
+++ b/api-ref/source/servers-action-evacuate.inc
@@ -11,6 +11,16 @@ Evacuates a server from a failed host to a new host.
- In the request body, if ``onSharedStorage`` is set, then do not set ``adminPass``.
- The target host should not be the same as the instance host.
+**Preconditions**
+
+- The failed host must be fenced and no longer running the original server.
+- The failed host must be reported as down or marked as forced down using
+ `Update Forced Down`_.
+
+Starting from API version 2.68, the ``force`` parameter is no longer accepted
+as this could not be meaningfully supported by servers with complex resource
+allocations.
+
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
diff --git a/api-ref/source/servers-action-remote-consoles.inc b/api-ref/source/servers-action-remote-consoles.inc
index 105a00dd10a..582365c2fce 100644
--- a/api-ref/source/servers-action-remote-consoles.inc
+++ b/api-ref/source/servers-action-remote-consoles.inc
@@ -11,7 +11,7 @@ Gets an `RDP `__ con
.. warning::
This action is deprecated in microversion 2.5 and superseded
- by the API `Server Remote Consoles`_ in microversion 2.6.
+ by the API `Server Consoles`_ in microversion 2.6.
The new API offers a unified API for different console types.
The only supported connect type is ``rdp-html5``. The ``type`` parameter should
@@ -64,7 +64,7 @@ Gets a serial console for a server.
.. warning::
This action is deprecated in microversion 2.5 and superseded
- by the API `Server Remote Consoles`_ in microversion 2.6.
+ by the API `Server Consoles`_ in microversion 2.6.
The new API offers a unified API for different console types.
Specify the ``os-getSerialConsole`` action in the request body.
@@ -117,7 +117,7 @@ Gets a SPICE console for a server.
.. warning::
This action is deprecated in microversion 2.5 and superseded
- by the API `Server Remote Consoles`_ in microversion 2.6.
+ by the API `Server Consoles`_ in microversion 2.6.
The new API offers a unified API for different console types.
Specify the ``os-getSPICEConsole`` action in the request body.
@@ -170,14 +170,11 @@ Gets a VNC console for a server.
.. warning::
This action is deprecated in microversion 2.5 and superseded
- by the API `Server Remote Consoles`_ in microversion 2.6.
+ by the API `Server Consoles`_ in microversion 2.6.
The new API offers a unified API for different console types.
Specify the ``os-getVNCConsole`` action in the request body.
-The supported connection types are ``novnc``, ``xvpvnc``. Such as connect
-with ``novnc``, set ``type`` parameter to ``novnc``.
-
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404),
diff --git a/api-ref/source/servers-action-shelve.inc b/api-ref/source/servers-action-shelve.inc
index b024031cdfc..08ca65daddb 100644
--- a/api-ref/source/servers-action-shelve.inc
+++ b/api-ref/source/servers-action-shelve.inc
@@ -138,15 +138,20 @@ If the server status does not change to ``ACTIVE``, the unshelve operation faile
Normal response codes: 202
-Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409)
+Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409)
Request
-------
+.. note:: Since microversion 2.77, allowed request body schema are
+ {"unshelve": null} or {"unshelve": {"availability_zone": }}.
+ A request body of {"unshelve": {}} is not allowed.
+
.. rest_parameters:: parameters.yaml
- server_id: server_id_path
- unshelve: unshelve
+ - availability_zone: availability_zone_unshelve
|
@@ -155,6 +160,11 @@ Request
.. literalinclude:: ../../doc/api_samples/os-shelve/os-unshelve.json
:language: javascript
+**Example Unshelve server (unshelve Action) (v2.77)**
+
+.. literalinclude:: ../../doc/api_samples/os-shelve/v2.77/os-unshelve.json
+ :language: javascript
+
Response
--------
diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc
index 530b8447249..4be66ebafad 100644
--- a/api-ref/source/servers-actions.inc
+++ b/api-ref/source/servers-actions.inc
@@ -10,11 +10,9 @@ in the request body.
You can associate a fixed or floating IP address with a server,
or disassociate a fixed or floating IP address from a server.
-You can attach a volume to a server.
You can create an image from a server, create a backup of a server,
-evacuate a server from a failed host to a new host, and force-delete a
-server before deferred cleanup.
+and force-delete a server before deferred cleanup.
You can lock, pause, reboot, rebuild, rescue, resize, resume, confirm
the resize of, revert a pending resize for, shelve, shelf-offload,
unshelve, start, stop, unlock, unpause, and unrescue a server. You can
@@ -24,6 +22,7 @@ into a server since Mitaka release.
You can get an RDP, serial, SPICE, or VNC console for a server.
+
Add (Associate) Floating Ip (addFloatingIp Action) (DEPRECATED)
================================================================
@@ -40,7 +39,7 @@ A pool of floating IP addresses, configured by the cloud administrator,
is available in OpenStack Compute. The project quota defines the maximum
number of floating IP addresses that you can allocate to the project.
After you `create (allocate) a floating IPaddress
-`__
+`__
for a project, you can associate that address with the server. Specify
the ``addFloatingIp`` action in the request body.
@@ -164,7 +163,7 @@ Specify the ``confirmResize`` action in the request body.
After you make this request, you typically must keep polling the server
status to determine whether the request succeeded. A successfully
confirming resize operation shows a status of ``ACTIVE`` or ``SHUTOFF``
-and a migration_status of ``confirmed``. You can also see the resized
+and a migration status of ``confirmed``. You can also see the resized
server in the compute node that OpenStack Compute manages.
**Preconditions**
@@ -177,9 +176,20 @@ to confirm the server.
**Troubleshooting**
-If the server status remains ``RESIZED``, the request failed. Ensure you
+If the server status remains ``VERIFY_RESIZE``, the request failed. Ensure you
meet the preconditions and run the request again. If the request fails
-again, investigate the compute back end or ask your cloud provider.
+again, the server status should be ``ERROR`` and a migration status of
+``error``. Investigate the compute back end or ask your cloud provider.
+There are some options for trying to correct the server status:
+
+* If the server is running and networking works, a user with proper
+ authority could reset the status of the server to ``active`` using the
+ :ref:`os-resetState` API.
+* If the server is not running, you can try hard rebooting the server using
+ the :ref:`reboot` API.
+
+Note that the cloud provider may still need to cleanup any orphaned resources
+on the source hypervisor.
Normal response codes: 204
@@ -285,9 +295,28 @@ image in the image back end that OpenStack Image service manages.
The server must exist.
You can only create a new image from the server when its status is ``ACTIVE``,
-``SHUTOFF``, ``PAUSED``, or ``SUSPENDED``.
+``SHUTOFF``, ``SUSPENDED`` or ``PAUSED``
+(``PAUSED`` is only supported for image-backed servers).
+
+The project must have sufficient volume snapshot quota in the block storage
+service when the server has attached volumes.
+If the project does not have sufficient volume snapshot quota,
+the API returns a 403 error.
+
+**Asynchronous Postconditions**
-The connection to the Image service is valid.
+A snapshot image will be created in the Image service.
+
+In the image-backed server case, volume snapshots of attached volumes will not
+be created.
+In the volume-backed server case,
+volume snapshots will be created for all volumes attached to the server and
+then those will be represented with a ``block_device_mapping`` image property
+in the resulting snapshot image in the Image service.
+If that snapshot image is used later to create a new server,
+it will result in a volume-backed server where the root volume is created
+from the snapshot of the original root volume. The volumes created from
+the snapshots of the original other volumes will be attached to the server.
**Troubleshooting**
@@ -349,9 +378,31 @@ Locks a server.
Specify the ``lock`` action in the request body.
+Most actions by non-admin users are not allowed to the server
+after this operation is successful and the server is locked.
+See the "Lock, Unlock" item in `Server actions
+`_
+for the restricted actions.
+But administrators can perform actions on the server
+even though the server is locked. Note that from microversion 2.73 it is
+possible to specify a reason when locking the server.
+
+The `unlock action
+`_
+will unlock a server in locked state so additional actions can
+be performed on the server by non-admin users.
+
+You can know whether a server is locked or not and the ``locked_reason``
+(if specified, from the 2.73 microversion) by the `List Servers Detailed API
+`_
+or
+the `Show Server Details API
+`_.
+
Policy defaults enable only users with the administrative role or
the owner of the server to perform this operation. Cloud providers
can change these permissions through the ``policy.json`` file.
+Administrators can overwrite owner's lock.
Normal response codes: 202
@@ -365,12 +416,18 @@ Request
- server_id: server_id_path
- lock: lock
+ - locked_reason: locked_reason_req
**Example Lock Server (lock Action)**
.. literalinclude:: ../../doc/api_samples/os-lock-server/lock-server.json
:language: javascript
+**Example Lock Server (lock Action) (v2.73)**
+
+.. literalinclude:: ../../doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json
+ :language: javascript
+
Response
--------
@@ -414,6 +471,8 @@ Response
If successful, this method does not return content in the response body.
+.. _reboot:
+
Reboot Server (reboot Action)
=============================
@@ -525,12 +584,18 @@ Request
- key_name: key_name_rebuild_req
- user_data: user_data_rebuild_req
- trusted_image_certificates: server_trusted_image_certificates_rebuild_req
+ - hostname: server_hostname_req
**Example Rebuild Server (rebuild Action) (v2.63)**
.. literalinclude:: ../../doc/api_samples/servers/v2.63/server-action-rebuild.json
:language: javascript
+**Example Rebuild Server (rebuild Action) (v2.90)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-action-rebuild.json
+ :language: javascript
+
Response
--------
@@ -575,12 +640,37 @@ Response
- key_name: key_name_rebuild_resp
- user_data: user_data_rebuild_resp
- trusted_image_certificates: server_trusted_image_certificates_resp
-
-**Example Rebuild Server (rebuild Action) (v2.63)**
-
-.. literalinclude:: ../../doc/api_samples/servers/v2.63/server-action-rebuild-resp.json
+ - server_groups: server_groups_2_71
+ - locked_reason: locked_reason_resp
+ - config_drive: config_drive_resp_update_rebuild
+ - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone_update_rebuild
+ - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host_update_rebuild
+ - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild
+ - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name_update_rebuild
+ - OS-EXT-STS:power_state: OS-EXT-STS:power_state_update_rebuild
+ - OS-EXT-STS:task_state: OS-EXT-STS:task_state_update_rebuild
+ - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state_update_rebuild
+ - OS-EXT-SRV-ATTR:hostname: server_hostname_update_rebuild
+ - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id_update_rebuild
+ - OS-EXT-SRV-ATTR:launch_index: server_launch_index_update_rebuild
+ - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id_update_rebuild
+ - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id_update_rebuild
+ - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name_update_rebuild
+ - os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached_update_rebuild
+ - os-extended-volumes:volumes_attached.id: os-extended-volumes:volumes_attached.id_update_rebuild
+ - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild
+ - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at_update_rebuild
+ - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at_update_rebuild
+ - security_groups: security_groups_obj_update_rebuild
+ - security_group.name: name_update_rebuild
+ - host_status: host_status_update_rebuild
+
+**Example Rebuild Server (rebuild Action) (v2.75)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.75/server-action-rebuild-resp.json
:language: javascript
+
Remove (Disassociate) Floating Ip (removeFloatingIp Action) (DEPRECATED)
=========================================================================
@@ -710,6 +800,7 @@ Response
.. literalinclude:: ../../doc/api_samples/os-rescue/server-rescue.json
:language: javascript
+
Resize Server (resize Action)
=============================
@@ -719,12 +810,6 @@ Resizes a server.
Specify the ``resize`` action in the request body.
-A successfully resized server shows a ``VERIFY_RESIZE`` status,
-``RESIZED`` VM status, and ``finished`` migration status. If you set the
-``resize_confirm_window`` option of the Compute service to an integer value,
-the Compute service automatically confirms the resize operation after
-the set interval in seconds.
-
**Preconditions**
You can only resize a server when its status is ``ACTIVE`` or ``SHUTOFF``.
@@ -732,6 +817,18 @@ You can only resize a server when its status is ``ACTIVE`` or ``SHUTOFF``.
If the server is locked, you must have administrator privileges
to resize the server.
+**Asynchronous Postconditions**
+
+A successfully resized server shows a ``VERIFY_RESIZE`` status and ``finished``
+migration status. If the cloud has configured the `resize_confirm_window`_
+option of the Compute service to a positive value, the Compute service
+automatically confirms the resize operation after the configured interval.
+
+.. _resize_confirm_window: https://docs.openstack.org/nova/latest/configuration/config.html#DEFAULT.resize_confirm_window
+
+.. note:: There is a `known limitation `__
+ that ephemeral disks are not resized.
+
Normal response codes: 202
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
diff --git a/api-ref/source/servers-admin-action.inc b/api-ref/source/servers-admin-action.inc
index e86e3e63f16..03a40d38ce5 100644
--- a/api-ref/source/servers-admin-action.inc
+++ b/api-ref/source/servers-admin-action.inc
@@ -8,7 +8,8 @@ Enables administrators to perform an action on a server. Specify the
action in the request body.
You can inject network information into, migrate, live-migrate,
-reset networking on, and reset the state of a server.
+reset networking on, reset the state of a server,
+and evacuate a server from a failed host to a new host.
Inject Network Information (injectNetworkInfo Action)
@@ -67,12 +68,12 @@ this parameter, the scheduler chooses a host.
**Asynchronous Postconditions**
-The server goes to a ``VERIFY_RESIZE`` status, ``RESIZED`` VM status,
-and ``finished`` migration status after a successful cold migration
-and then must be confirmed or reverted. If you set the
-``resize_confirm_window`` option of the Compute service to a positive integer
-value, the Compute service automatically confirms the migrate operation
-after the set interval in seconds.
+A successfully migrated server shows a ``VERIFY_RESIZE`` status and ``finished``
+migration status. If the cloud has configured the `resize_confirm_window`_
+option of the Compute service to a positive value, the Compute service
+automatically confirms the migrate operation after the configured interval.
+
+.. _resize_confirm_window: https://docs.openstack.org/nova/latest/configuration/config.html#DEFAULT.resize_confirm_window
Policy defaults enable only users with the administrative role to
perform this operation. Cloud providers can change these permissions
@@ -136,6 +137,10 @@ Nova responds immediately, and no pre-live-migration checks are returned.
The instance will not immediately change state to ``ERROR``, if a failure of
the live-migration checks occurs.
+Starting from API version 2.68, the ``force`` parameter is no longer accepted
+as this could not be meaningfully supported by servers with complex resource
+allocations.
+
Normal response codes: 202
Error response codes: badRequest(400), unauthorized(401), forbidden(403)
@@ -165,17 +170,19 @@ Response
If successful, this method does not return content in the response body.
-Reset Networking On A Server (resetNetwork Action)
-==================================================
+Reset Networking On A Server (resetNetwork Action) (DEPRECATED)
+===============================================================
.. rest_method:: POST /servers/{server_id}/action
Resets networking on a server.
-.. note::
+.. warning::
- Only the XenServer driver implements this feature and only if the guest
- has the XenAPI agent in the targeted server.
+ This action was only supported by the XenAPI virt driver, which was
+ deprecated in the 20.0.0 (Train) release and removed in the 22.0.0
+ (Victoria) release. This action should be avoided in new applications. It
+ was removed in the 23.0.0 (Wallaby) release.
Specify the ``resetNetwork`` action in the request body.
@@ -186,7 +193,7 @@ through the ``policy.json`` file.
Normal response codes: 202
Error response codes: unauthorized(401), forbidden(403), itemNotFound(404),
-conflict(409)
+conflict(409), gone(410)
Request
-------
@@ -206,6 +213,7 @@ Response
If successful, this method does not return content in the response body.
+.. _os-resetState:
Reset Server State (os-resetState Action)
=========================================
diff --git a/api-ref/source/servers-remote-consoles.inc b/api-ref/source/servers-remote-consoles.inc
index 7bcf96b17c0..c8515d3315b 100644
--- a/api-ref/source/servers-remote-consoles.inc
+++ b/api-ref/source/servers-remote-consoles.inc
@@ -1,13 +1,13 @@
.. -*- rst -*-
-======================
-Server Remote Consoles
-======================
+=================
+ Server Consoles
+=================
-Create server remote console.
+Manage server consoles.
-Create Remote Console
-=====================
+Create Console
+==============
.. rest_method:: POST /servers/{server_id}/remote-consoles
@@ -17,9 +17,7 @@ The API provides a unified request for creating a remote console. The user can
get a URL to connect the console from this API. The URL includes the token
which is used to get permission to access the console. Servers may support
different console protocols. To return a remote console using a specific
-protocol, such as RDP, set the ``protocol`` parameter to ``rdp``. For the same
-protocol, there may be different connection types such as ``vnc protocol and
-novnc type`` or ``vnc protocol and xvpvnc type``.
+protocol, such as RDP, set the ``protocol`` parameter to ``rdp``.
Normal response codes: 200
@@ -56,3 +54,45 @@ Response
.. literalinclude:: ../../doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json
:language: javascript
+
+
+Show Console Connection Information
+===================================
+
+.. rest_method:: GET /os-console-auth-tokens/{console_token}
+
+Given the console authentication token for a server, shows the related
+connection information.
+
+This method used to be available only for the ``rdp-html5`` console type before
+microversion 2.31. Starting from microversion 2.31 it's available for all
+console types.
+
+Normal response codes: 200
+
+Error response codes: badRequest(400), unauthorized(401), forbidden(403),
+itemNotFound(404)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - console_token: console_token
+
+
+Response
+--------
+
+.. rest_parameters:: parameters.yaml
+
+ - console: console
+ - instance_uuid: instance_id_body
+ - host: console_host
+ - port: port_number
+ - internal_access_path: internal_access_path
+
+**Example Show Console Authentication Token**
+
+.. literalinclude:: ../../doc/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json
+ :language: javascript
diff --git a/api-ref/source/servers.inc b/api-ref/source/servers.inc
index 8883f9af0ac..547a71e9146 100644
--- a/api-ref/source/servers.inc
+++ b/api-ref/source/servers.inc
@@ -99,8 +99,10 @@ List Servers
.. rest_method:: GET /servers
-Lists IDs, names, and links for all servers.
+Lists IDs, names, and links for servers.
+By default the servers are filtered using the project ID associated
+with the authenticated request.
Servers contain a status attribute that indicates the current server
state. You can filter on the server status when you complete a list
@@ -137,13 +139,12 @@ body. The possible server status values are:
- ``SOFT_DELETED``. The server is marked as deleted but the disk
images are still available to restore.
- ``SUSPENDED``. The server is suspended, either by request or
- necessity. This status appears for only the XenServer/XCP, KVM, and
- ESXi hypervisors. Administrative users can suspend an instance if it
- is infrequently used or to perform system maintenance. When you
- suspend an instance, its VM state is stored on disk, all memory is
- written to disk, and the virtual machine is stopped. Suspending an
- instance is similar to placing a device in hibernation; memory and
- vCPUs become available to create other instances.
+ necessity. When you suspend a server, its state is stored
+ on disk, all memory is written to disk, and the server is stopped.
+ Suspending a server is similar to placing a device in hibernation and its
+ occupied resource will not be freed but rather kept for when the server is
+ resumed. If a server is infrequently used and the occupied resource needs
+ to be freed to create other servers, it should be shelved.
- ``UNKNOWN``. The state of the server is unknown. Contact your cloud
provider.
- ``VERIFY_RESIZE``. System is awaiting confirmation that the server
@@ -154,9 +155,10 @@ There is whitelist for valid filter keys. Any filter key other than from
whitelist will be silently ignored.
- For non-admin users, whitelist is different from admin users whitelist.
- Valid whitelist for non-admin users includes
+ The valid whitelist can be configured using the
+ ``os_compute_api:servers:allow_all_filters`` policy rule. By default,
+ the valid whitelist for non-admin users includes
- - ``all_tenants``
- ``changes-since``
- ``flavor``
- ``image``
@@ -169,11 +171,31 @@ whitelist will be silently ignored.
- ``status``
- ``tags`` (New in version 2.26)
- ``tags-any`` (New in version 2.26)
-
+ - ``changes-before`` (New in version 2.66)
+ - ``locked`` (New in version 2.73)
+ - ``availability_zone`` (New in version 2.83)
+ - ``config_drive`` (New in version 2.83)
+ - ``key_name`` (New in version 2.83)
+ - ``created_at`` (New in version 2.83)
+ - ``launched_at`` (New in version 2.83)
+ - ``terminated_at`` (New in version 2.83)
+ - ``power_state`` (New in version 2.83)
+ - ``task_state`` (New in version 2.83)
+ - ``vm_state`` (New in version 2.83)
+ - ``progress`` (New in version 2.83)
+ - ``user_id`` (New in version 2.83)
- For admin user, whitelist includes all filter keys mentioned in
:ref:`list-server-request` Section.
+.. note:: Starting with microversion 2.69 if server details cannot be loaded
+ due to a transient condition in the deployment like infrastructure failure,
+ the response body for those unavailable servers will be missing keys. See
+ `handling down cells
+ `__
+ section of the Compute API guide for more information on the keys that
+ would be returned in the partial constructs.
+
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401),
@@ -230,6 +252,8 @@ Request
- not-tags-any: not_tags_any_query
- tags: tags_query
- tags-any: tags_any_query
+ - changes-before: changes_before_server
+ - locked: locked_query_server
Response
--------
@@ -247,6 +271,15 @@ Response
.. literalinclude:: ../../doc/api_samples/servers/servers-list-resp.json
:language: javascript
+**Example List Servers (2.69)**
+
+This is a sample response for the servers from the non-responsive part of the
+deployment. The responses for the available server records will be normal
+without any missing keys.
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.69/servers-list-resp.json
+ :language: javascript
+
Create Server
=============
@@ -271,13 +304,13 @@ When you create a server, the response shows only the server ID, its
links, and the admin password. You can get additional attributes
through subsequent ``GET`` requests on the server.
-Include the ``block-device-mapping-v2`` parameter in the create
+Include the ``block_device_mapping_v2`` parameter in the create
request body to boot a server from a volume.
Include the ``key_name`` parameter in the create request body to add a
keypair to the server when you create it. To create a keypair, make a
`create keypair
-`__
+`__
request.
.. note:: Starting with microversion 2.37 the ``networks`` field is required.
@@ -335,7 +368,6 @@ Request
.. rest_parameters:: parameters.yaml
-
- server: server
- flavorRef: flavorRef
- name: server_name
@@ -361,6 +393,7 @@ Request
- block_device_mapping_v2.uuid: block_device_uuid
- block_device_mapping_v2.volume_size: volume_size
- block_device_mapping_v2.tag: device_tag_bdm
+ - block_device_mapping_v2.volume_type: device_volume_type
- config_drive: config_drive
- imageRef: imageRef
- key_name: key_name
@@ -370,8 +403,11 @@ Request
- security_groups: security_groups
- user_data: user_data
- description: server_description
+ - hostname: server_hostname_req
- tags: server_tags_create
- trusted_image_certificates: server_trusted_image_certificates_create_req
+ - host: server_host_create
+ - hypervisor_hostname: server_hypervisor_hostname_create
- os:scheduler_hints: os:scheduler_hints
- os:scheduler_hints.build_near_host_ip: os:scheduler_hints_build_near_host_ip
- os:scheduler_hints.cidr: os:scheduler_hints_cidr
@@ -402,6 +438,16 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.63/server-create-req.json
:language: javascript
+**Example Create Server With Host and Hypervisor Hostname (v2.74)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json
+ :language: javascript
+
+**Example Create Server With Hostname (v2.90)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-create-req.json
+ :language: javascript
+
Response
--------
@@ -483,7 +529,7 @@ List Servers Detailed
.. rest_method:: GET /servers/detail
-For each server, shows server details including configuration drive,
+For each server, shows server details including config drive,
extended status, and server usage information.
The extended status information appears in the OS-EXT-STS:vm_state,
@@ -492,12 +538,16 @@ OS-EXT-STS:power_state, and OS-EXT-STS:task_state attributes.
The server usage information appears in the OS-SRV-USG:launched_at and
OS-SRV-USG:terminated_at attributes.
-To hide addresses information for instances in a certain state, set
-the osapi_hide_server_address_states configuration option. Set this
-option to a valid VM state in the nova.conf configuration file.
-
HostId is unique per account and is not globally unique.
+.. note:: Starting with microversion 2.69 if server details cannot be loaded
+ due to a transient condition in the deployment like infrastructure failure,
+ the response body for those unavailable servers will be missing keys. See
+ `handling down cells
+ `__
+ section of the Compute API guide for more information on the keys that
+ would be returned in the partial constructs.
+
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401),
@@ -552,6 +602,8 @@ Request
- not-tags-any: not_tags_any_query
- tags: tags_query
- tags-any: tags_any_query
+ - changes-before: changes_before_server
+ - locked: locked_query_server
Response
--------
@@ -586,8 +638,15 @@ Response
- OS-DCF:diskConfig: disk_config
- OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone
- OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host
+ - OS-EXT-SRV-ATTR:hostname: server_hostname
- OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname
- OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name
+ - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id
+ - OS-EXT-SRV-ATTR:launch_index: server_launch_index
+ - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id
+ - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id
+ - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name
+ - OS-EXT-SRV-ATTR:user_data: server_user_data
- OS-EXT-STS:power_state: OS-EXT-STS:power_state
- OS-EXT-STS:task_state: OS-EXT-STS:task_state
- OS-EXT-STS:vm_state: OS-EXT-STS:vm_state
@@ -596,8 +655,6 @@ Response
- os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination
- OS-SRV-USG:launched_at: OS-SRV-USG:launched_at
- OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at
- - security_groups: security_groups_obj
- - security_group.name: name
- status: server_status
- tenant_id: tenant_id_body
- updated: updated
@@ -608,23 +665,28 @@ Response
- fault.message: fault_message
- fault.details: fault_details
- progress: progress
+ - security_groups: security_groups_obj_optional
+ - security_group.name: name
- servers_links: servers_links
- - OS-EXT-SRV-ATTR:hostname: server_hostname
- - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id
- - OS-EXT-SRV-ATTR:launch_index: server_launch_index
- - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id
- - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id
- - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name
- - OS-EXT-SRV-ATTR:user_data: server_user_data
- locked: locked
- host_status: host_status
- description: server_description_resp
- tags: tags
- trusted_image_certificates: server_trusted_image_certificates_resp
+ - locked_reason: locked_reason_resp
-**Example List Servers Detailed (2.63)**
+**Example List Servers Detailed (2.73)**
-.. literalinclude:: /../../doc/api_samples/servers/v2.63/servers-details-resp.json
+.. literalinclude:: /../../doc/api_samples/servers/v2.73/servers-details-resp.json
+ :language: javascript
+
+**Example List Servers Detailed (2.69)**
+
+This is a sample response for the servers from the non-responsive part of the
+deployment. The responses for the available server records will be normal
+without any missing keys.
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.69/servers-details-resp.json
:language: javascript
@@ -641,14 +703,20 @@ The extended status information appears in the ``OS-EXT-STS:vm_state``, ``OS-EXT
The server usage information appears in the ``OS-SRV-USG:launched_at`` and ``OS-SRV-USG:terminated_at`` attributes.
-To hide ``addresses`` information for instances in a certain state, set the ``osapi_hide_server_address_states`` configuration option. Set this option to a valid VM state in the ``nova.conf`` configuration file.
-
HostId is unique per account and is not globally unique.
**Preconditions**
The server must exist.
+.. note:: Starting with microversion 2.69 if the server detail cannot be loaded
+ due to a transient condition in the deployment like infrastructure failure,
+ the response body for the unavailable server will be missing keys. See
+ `handling down cells
+ `__
+ section of the Compute API guide for more information on the keys that
+ would be returned in the partial constructs.
+
Normal response codes: 200
Error response codes: unauthorized(401), forbidden(403),
@@ -694,8 +762,15 @@ Response
- OS-DCF:diskConfig: disk_config
- OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone
- OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host
+ - OS-EXT-SRV-ATTR:hostname: server_hostname
- OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname
- OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name
+ - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id
+ - OS-EXT-SRV-ATTR:launch_index: server_launch_index
+ - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id
+ - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id
+ - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name
+ - OS-EXT-SRV-ATTR:user_data: server_user_data
- OS-EXT-STS:power_state: OS-EXT-STS:power_state
- OS-EXT-STS:task_state: OS-EXT-STS:task_state
- OS-EXT-STS:vm_state: OS-EXT-STS:vm_state
@@ -704,8 +779,6 @@ Response
- os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination
- OS-SRV-USG:launched_at: OS-SRV-USG:launched_at
- OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at
- - security_groups: security_groups_obj
- - security_group.name: name
- status: server_status
- tenant_id: tenant_id_body
- updated: updated
@@ -716,22 +789,28 @@ Response
- fault.message: fault_message
- fault.details: fault_details
- progress: progress
- - OS-EXT-SRV-ATTR:hostname: server_hostname
- - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id
- - OS-EXT-SRV-ATTR:launch_index: server_launch_index
- - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id
- - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id
- - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name
- - OS-EXT-SRV-ATTR:user_data: server_user_data
+ - security_groups: security_groups_obj_optional
+ - security_group.name: name
- locked: locked
- host_status: host_status
- description: server_description_resp
- tags: tags
- trusted_image_certificates: server_trusted_image_certificates_resp
+ - server_groups: server_groups_2_71
+ - locked_reason: locked_reason_resp
-**Example Show Server Details (2.63)**
+**Example Show Server Details (2.73)**
-.. literalinclude:: ../../doc/api_samples/servers/v2.63/server-get-resp.json
+.. literalinclude:: ../../doc/api_samples/servers/v2.73/server-get-resp.json
+ :language: javascript
+
+**Example Show Server Details (2.69)**
+
+This is a sample response for a server from the non-responsive part of the
+deployment. The responses for available server records will be normal
+without any missing keys.
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.69/server-get-resp.json
:language: javascript
Update Server
@@ -756,11 +835,14 @@ Request
- accessIPv4: accessIPv4_in
- accessIPv6: accessIPv6_in
- name: server_name_optional
+ - hostname: server_hostname_req
- OS-DCF:diskConfig: OS-DCF:diskConfig
- description: server_description
-.. note:: You can specify parameters to update independently.
- e.g. ``name`` only, ``description`` only, ``name`` and ``description``, etc.
+.. note::
+
+ You can specify parameters to update independently.
+ e.g. ``name`` only, ``description`` only, ``name`` and ``description``, etc.
**Example Update Server (2.63)**
@@ -810,10 +892,36 @@ Response
- description: server_description_resp
- tags: tags
- trusted_image_certificates: server_trusted_image_certificates_resp
-
-**Example Update Server (2.63)**
-
-.. literalinclude:: ../../doc/api_samples/servers/v2.63/server-update-resp.json
+ - server_groups: server_groups_2_71
+ - locked_reason: locked_reason_resp
+ - config_drive: config_drive_resp_update_rebuild
+ - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone_update_rebuild
+ - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host_update_rebuild
+ - OS-EXT-SRV-ATTR:hostname: server_hostname_update_rebuild
+ - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild
+ - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name_update_rebuild
+ - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id_update_rebuild
+ - OS-EXT-SRV-ATTR:launch_index: server_launch_index_update_rebuild
+ - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id_update_rebuild
+ - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id_update_rebuild
+ - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name_update_rebuild
+ - OS-EXT-SRV-ATTR:user_data: server_user_data_update
+ - OS-EXT-STS:power_state: OS-EXT-STS:power_state_update_rebuild
+ - OS-EXT-STS:task_state: OS-EXT-STS:task_state_update_rebuild
+ - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state_update_rebuild
+ - os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached_update_rebuild
+ - os-extended-volumes:volumes_attached.id: os-extended-volumes:volumes_attached.id_update_rebuild
+ - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild
+ - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at_update_rebuild
+ - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at_update_rebuild
+ - security_groups: security_groups_obj_update_rebuild
+ - security_group.name: name_update_rebuild
+ - host_status: host_status_update_rebuild
+ - key_name: key_name_resp_update
+
+**Example Update Server (2.75)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.75/server-update-resp.json
:language: javascript
Delete Server
diff --git a/api-ref/source/versions.inc b/api-ref/source/versions.inc
index 7af60604f25..f8636d3b40c 100644
--- a/api-ref/source/versions.inc
+++ b/api-ref/source/versions.inc
@@ -14,7 +14,7 @@ supports versioning. There are two kinds of versions in Nova.
For more details about Microversions, please reference:
`Microversions
-`_
+`_
.. note:: The maximum microversion supported by each release varies.
Please reference:
@@ -66,7 +66,7 @@ v2.1 API is lower than listed below.
Show Details of Specific API Version
====================================
-.. rest_method:: GET /{api_version}
+.. rest_method:: GET /{api_version}/
This gets the details of a specific API at its root. Nearly all this
information exists at the API root, so this is mostly a redundant
@@ -102,7 +102,7 @@ Response
Response Example
----------------
-This is an example of a ``GET /v2.1`` on a relatively current server.
+This is an example of a ``GET /v2.1/`` on a relatively current server.
.. literalinclude:: /../../doc/api_samples/versions/v21-version-get-resp.json
:language: javascript
diff --git a/babel.cfg b/babel.cfg
deleted file mode 100644
index 15cd6cb76b9..00000000000
--- a/babel.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[python: **.py]
-
diff --git a/bindep.txt b/bindep.txt
index 4621c7fad69..3a4d7bef806 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -2,13 +2,19 @@
# see https://docs.openstack.org/infra/bindep/ for additional information.
build-essential [platform:dpkg test]
+# fonts-freefont-otf is needed for pdf docs builds with the 'xelatex' engine
+fonts-freefont-otf [pdf-docs]
gcc [platform:rpm test]
# gettext and graphviz are needed by doc builds only. For transition,
# have them in both doc and test.
# TODO(jaegerandi): Remove test once infra scripts are updated.
gettext [doc test]
graphviz [doc test]
+# libsrvg2 is needed for sphinxcontrib-svg2pdfconverter in docs builds.
+librsvg2-tools [doc platform:rpm]
+librsvg2-bin [doc platform:dpkg]
language-pack-en [platform:ubuntu]
+latexmk [pdf-docs]
libffi-dev [platform:dpkg test]
libffi-devel [platform:rpm test]
libmysqlclient-dev [platform:dpkg]
@@ -18,10 +24,15 @@ libxml2-dev [platform:dpkg test]
libxslt-devel [platform:rpm test]
libxslt1-dev [platform:dpkg test]
locales [platform:debian]
-mysql [platform:rpm]
+mysql [platform:rpm !platform:redhat]
mysql-client [platform:dpkg]
-mysql-devel [platform:rpm test]
-mysql-server
+mysql-devel [platform:rpm !platform:redhat test]
+mysql-server [!platform:redhat]
+mariadb-devel [platform:rpm platform:redhat test]
+mariadb-server [platform:rpm platform:redhat]
+openssh-client [platform:dpkg]
+openssh-clients [platform:rpm]
+openssl
pkg-config [platform:dpkg test]
pkgconfig [platform:rpm test]
postgresql
@@ -29,11 +40,18 @@ postgresql-client [platform:dpkg]
postgresql-devel [platform:rpm test]
postgresql-server [platform:rpm]
python-dev [platform:dpkg test]
-python-devel [platform:rpm test]
-python3-all [platform:dpkg !platform:ubuntu-precise]
-python3-all-dev [platform:dpkg !platform:ubuntu-precise]
-python3-devel [platform:fedora]
-python34-devel [platform:centos]
+python3-all [platform:dpkg]
+python3-all-dev [platform:dpkg]
+python3 [platform:rpm test]
+python3-devel [platform:rpm test]
sqlite-devel [platform:rpm test]
+texlive [pdf-docs]
+texlive-latex-recommended [pdf-docs]
+texlive-xetex [pdf-docs]
libpcre3-dev [platform:dpkg test]
pcre-devel [platform:rpm test]
+# Nova uses lsscsi via os-brick. Due to bindep usage in devstack and
+# elsewhere, we add it here to make sure it is picked up and available at
+# runtime and in unit tests. Net result is the same that lsscsi will be
+# installed for any nova installation.
+lsscsi
diff --git a/concourse_unit_test_task b/concourse_unit_test_task
new file mode 100644
index 00000000000..1ee9526f87b
--- /dev/null
+++ b/concourse_unit_test_task
@@ -0,0 +1,8 @@
+export DEBIAN_FRONTEND=noninteractive && \
+apt-get update && \
+apt-get install -y build-essential python3-pip python3-dev git libpcre++-dev gettext libpq-dev && \
+pip install -U pip && \
+pip install tox && \
+cd source && \
+export UPPER_CONSTRAINTS_FILE=https://raw.githubusercontent.com/sapcc/requirements/stable/xena-m3/upper-constraints.txt && \
+tox -e pep8,py
diff --git a/contrib/profile_caching_scheduler.sh b/contrib/profile_caching_scheduler.sh
deleted file mode 100755
index df38ab12bfe..00000000000
--- a/contrib/profile_caching_scheduler.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-# Copyright (c) 2014 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# This runs a unit test that uses pycallgraph
-# to profile the select_destinations call
-# in the CachingScheduler
-#
-# For this script to work please run:
-# python setup.py develop
-# pip install -r requirements.txt
-# pip install -r test-requirements.txt
-# pip install pycallgraph
-# export EVENTLET_NO_GREENDNS='yes'
-#
-BASEDIR=$(dirname $0)
-TEST=$BASEDIR/../nova/tests/scheduler/test_caching_scheduler.py
-echo
-echo "Running this unit test file as a python script:"
-echo $TEST
-
-python $TEST
-
-RESULTDIR=$(pwd)
-echo
-echo "For profiler result see: "
-echo $RESULTDIR/scheduler.png
-echo
diff --git a/custom-requirements.txt b/custom-requirements.txt
new file mode 100644
index 00000000000..c3732972506
--- /dev/null
+++ b/custom-requirements.txt
@@ -0,0 +1,11 @@
+dumb-init
+python-memcached
+pymemcache
+mitmproxy
+python-ironicclient
+
+-e git+https://github.com/sapcc/raven-python.git@ccloud#egg=raven
+-e git+https://github.com/sapcc/openstack-watcher-middleware.git#egg=watcher-middleware
+-e git+https://github.com/sapcc/openstack-audit-middleware.git#egg=audit-middleware
+-e git+https://github.com/sapcc/python-agentliveness.git#egg=agentliveness
+-e git+https://github.com/sapcc/oslo.vmware.git@stable/xena-m3#egg=oslo.vmware
diff --git a/devstack/nova-multi-cell-exclude-list.txt b/devstack/nova-multi-cell-exclude-list.txt
new file mode 100644
index 00000000000..a61229c9064
--- /dev/null
+++ b/devstack/nova-multi-cell-exclude-list.txt
@@ -0,0 +1,12 @@
+# --exclude-list contents for the nova-multi-cell job defined in .zuul.yaml
+# See: https://stestr.readthedocs.io/en/latest/MANUAL.html#test-selection
+
+# Exclude tempest.scenario.test_network tests since they are slow and
+# only test advanced neutron features, unrelated to multi-cell testing.
+^tempest.scenario.test_network
+
+# Also exlude resize and migrate tests with qos ports as qos is currently
+# not supported in cross cell resize case . See
+# https://bugs.launchpad.net/nova/+bug/1907511 for details
+test_migrate_with_qos_min_bw_allocation
+test_resize_with_qos_min_bw_allocation
diff --git a/devstack/tempest-dsvm-caching-scheduler-rc b/devstack/tempest-dsvm-caching-scheduler-rc
deleted file mode 100644
index cc09af6b82d..00000000000
--- a/devstack/tempest-dsvm-caching-scheduler-rc
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# This script is executed in the OpenStack CI nova-caching-scheduler job.
-# It's used to configure which tempest tests actually get run. You can find
-# the CI job configuration under playbooks/legacy/nova-caching-scheduler/.
-#
-
-# Construct a regex to use when limiting scope of tempest
-# to avoid features unsupported by Nova's CachingScheduler support.
-
-# When adding entries to the regex, add a comment explaining why
-# since this list should not grow.
-
-r="^(?!.*"
-# exclude the slow tag
-r="$r(?:.*\[.*\bslow\b.*\])"
-
-# NOTE(mriedem): ServersAdminTestJSON.test_create_server_with_scheduling_hint
-# is skipped because it relies on the SameHostFilter which relies on the
-# HostState object which might be stale when that filter runs.
-# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_create_server_with_scheduling_hint
-r="$r|(?:.*id\-fdcd9b33\-0903\-4e00\-a1f7\-b5f6543068d6.*)"
-# NOTE(mriedem): AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az
-# is skipped because it creates an aggregate and adds a host to it, then
-# creates a server in that aggregate but fails to schedule because the caching
-# scheduler hasn't updated the host's aggregates view yet.
-# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az
-r="$r|(?:.*id\-96be03c7\-570d\-409c\-90f8\-e4db3c646996.*)"
-r="$r).*$"
-
-export DEVSTACK_GATE_TEMPEST_REGEX="$r"
diff --git a/devstack/tempest-dsvm-cells-rc b/devstack/tempest-dsvm-cells-rc
deleted file mode 100644
index fadcc621118..00000000000
--- a/devstack/tempest-dsvm-cells-rc
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-#
-# This script is executed in the OpenStack CI *tempest-dsvm-cells job.
-# It's used to configure which tempest tests actually get run. You can find
-# the CI job configuration here:
-#
-# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/devstack-gate.yaml
-#
-# NOTE(sdague): tempest (because of testr) only supports and additive
-# regex for specifying test selection. As such this is a series of
-# negative assertions ?: for strings.
-#
-# Being a regex, an unescaped '.' matches any character, so those
-# should be escaped. There is no need to specify .* at the end of a
-# pattern, as it's handled by the final match.
-
-# Test idempotent ids are used for specific tests because
-# these are unchanged if the test name changes.
-
-# Construct a regex to use when limiting scope of tempest
-# to avoid features unsupported by Nova Cells.
-r="^(?!.*"
-
-# skip security group tests
-r="$r(?:tempest\.api\.compute\.security_groups)"
-
-# skip aggregates tests
-r="$r|(?:tempest\.api\.compute\.admin\.test_aggregates)"
-r="$r|(?:tempest\.scenario\.test_aggregates_basic_ops)"
-
-# skip availability zone tests
-r="$r|(?:(tempest\.api\.compute\.)(servers\.|admin\.)(test_availability_zone*))"
-
-# exclude the slow tag
-r="$r|(?:.*\[.*\bslow\b.*\])"
-
-# skip current regressions; when adding new entries to this list, add the bug
-# reference with it since this list should shrink
-
-# NOTE(mriedem): Resize tests are skipped in devstack until custom flavors
-# in devstack used in Tempest runs are synced to the cells database.
-# NOTE(mriedem): Rescue tests are skipped in devstack. They rely on floating
-# IPs and security groups, and rescue might not work with cells v1 anyway due
-# to synchronization issues.
-
-# tempest.api.compute.admin.test_networks.NetworksTest.test_get_network)"
-r="$r|(?:.*id\-d206d211\-8912\-486f\-86e2\-a9d090d1f416.*)"
-# tempest.api.compute.admin.test_networks.NetworksTest.test_list_all_networks)"
-r="$r|(?:.*id\-df3d1046\-6fa5\-4b2c\-ad0c\-cfa46a351cb9.*)"
-# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_create_server_with_scheduler_hint_group
-r="$r|(?:.*id\-ed20d3fb\-9d1f\-4329\-b160\-543fbd5d9811.*)"
-# tempest.api.compute.servers.test_virtual_interfaces.VirtualInterfacesTestJSON.test_list_virtual_interfaces
-r="$r|(?:.*id\-96c4e2ef\-5e4d\-4d7f\-87f5\-fed6dca18016.*)"
-# tempest.api.compute.test_networks.ComputeNetworksTest.test_list_networks
-r="$r|(?:.*id\-3fe07175\-312e\-49a5\-a623\-5f52eeada4c2.*)"
-# tempest.scenario.test_minimum_basic.TestMinimumBasicScenario.test_minimum_basic_scenario
-r="$r|(?:.*id\-bdbb5441\-9204\-419d\-a225\-b4fdbfb1a1a8.*)"
-# tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup
-r="$r|(?:.*id\-cbc752ed\-b716\-4717\-910f\-956cce965722.*)"
-# tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks
-r="$r|(?:.*id\-79165fb4\-5534\-4b9d\-8429\-97ccffb8f86e.*)"
-# tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basicops
-r="$r|(?:.*id\-7fff3fb3\-91d8\-4fd0\-bd7d\-0204f1f180ba.*)"
-# tempest.scenario.test_snapshot_pattern.TestSnapshotPattern.test_snapshot_pattern
-r="$r|(?:.*id\-608e604b\-1d63\-4a82\-8e3e\-91bc665c90b4.*)"
-# tempest.api.compute.admin.test_hosts.HostsAdminTestJSON.test_show_host_detail
-r="$r|(?:.*id\-38adbb12\-aee2\-4498\-8aec\-329c72423aa4.*)"
-# tempest.api.compute.test_tenant_networks.ComputeTenantNetworksTest.test_list_show_tenant_networks
-r="$r|(?:.*id\-edfea98e\-bbe3\-4c7a\-9739\-87b986baff26.*)"
-# https://bugs.launchpad.net/nova/+bug/1489581
-r="$r|(?:tempest\.scenario\.test_volume_boot_pattern\.)"
-# https://bugs.launchpad.net/nova/+bug/1466696 - Cells: Race between instance 'unlock' and 'stop' can cause 'stop' to fail
-# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server
-r="$r|(?:.*id\-80a8094c\-211e\-440a\-ab88\-9e59d556c7ee.*)"
-# scheduler hints apparently don't work in devstack cells
-# tempest.scenario.test_server_multinode.TestServerMultinode.test_schedule_to_all_nodes
-r="$r|(?:.*id\-9cecbe35\-b9d4\-48da\-a37e\-7ce70aa43d30.*)"
-# test_stamp_pattern uses security groups which aren't supported in cells v1
-# tempest.scenario.test_stamp_pattern.TestStampPattern.test_stamp_pattern
-r="$r|(?:.*id\-10fd234a\-515c\-41e5\-b092\-8323060598c5.*)"
-# Bug 1709985: rebuild randomly times out, probably due to sync issues
-# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_rebuild_server_in_error_state
-r="$r|(?:.*id\-682cb127\-e5bb\-4f53\-87ce\-cb9003604442.*)"
-# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_rebuild_server_with_auto_disk_config
-r="$r|(?:.*id\-9c9fae77\-4feb\-402f\-8450\-bf1c8b609713.*)"
-# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_rebuild_server_with_manual_disk_config
-r="$r|(?:.*id\-bef56b09\-2e8c\-4883\-a370\-4950812f430e.*)"
-# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server
-r="$r|(?:.*id\-aaa6cdf3\-55a7\-461a\-add9\-1c8596b9a07c.*)"
-# tempest.api.compute.servers.test_servers.ServerShowV247Test.test_update_rebuild_list_server
-r="$r|(?:.*id\-8de397c2\-57d0\-4b90\-aa30\-e5d668f21a8b.*)"
-# tempest.api.compute.servers.test_servers_microversions.ServerShowV254Test.test_rebuild_server
-r="$r|(?:.*id\-09170a98\-4940\-4637\-add7\-1a35121f1a5a.*)"
-# tempest.api.compute.servers.test_servers_microversions.ServerShowV257Test.test_rebuild_server
-r="$r|(?:.*id\-803df848\-080a\-4261\-8f11\-b020cd9b6f60.*)"
-# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server_in_stop_state
-r="$r|(?:.*id\-30449a88\-5aff\-4f9b\-9866\-6ee9b17f906d.*)"
-# tempest.api.compute.servers.test_servers.ServerShowV263Test.test_show_update_rebuild_list_server
-r="$r|(?:.*id\-71b8e3d5\-11d2\-494f\-b917\-b094a4afed3c.*)"
-# NOTE(mriedem): cells v1 api doesn't route os-server-external-events
-# tempest.api.volume.test_volumes_extend.VolumesExtendAttachedTest.test_extend_attached_volume
-r="$r|(?:.*id\-301f5a30\-1c6f\-4ea0\-be1a\-91fd28d44354.*)"
-r="$r).*$"
-
-export DEVSTACK_GATE_TEMPEST_REGEX="$r"
-
-# Don't run the cells v1 job with ssh validation since it uses floating IPs
-# by default which cells v1 doesn't support.
-export DEVSTACK_LOCAL_CONFIG="TEMPEST_RUN_VALIDATION=False"
diff --git a/devstack/tempest-dsvm-lvm-rc b/devstack/tempest-dsvm-lvm-rc
deleted file mode 100644
index b4c5643b15b..00000000000
--- a/devstack/tempest-dsvm-lvm-rc
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2016 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-#
-# This script is executed in the OpenStack CI *tempest-dsvm-lvm job.
-# It's used to configure which tempest tests actually get run. You can find
-# the CI job configuration here:
-#
-# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/lvm.yaml
-#
-
-# Construct a regex to use when limiting scope of tempest
-# to avoid features unsupported by Nova's LVM support.
-
-# Note that several tests are disabled by the use of tempest
-# feature toggles in devstack/lib/tempest for an lvm config,
-# so this regex is not entirely representative of what's excluded.
-
-# When adding entries to the regex, add a comment explaining why
-# since this list should not grow.
-
-r="^(?!.*"
-r="$r(?:.*\[.*\bslow\b.*\])"
-
-# Only run compute API tests. The ! here looks confusing but it's to negate
-# the ! at the beginning of the regex since the rest of this is meant to be
-# a backlist.
-r="$r|(?!.*api.compute.*)"
-
-# NOTE(mriedem): resize of non-volume-backed lvm instances does not yet work
-# tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_list_migrations_in_flavor_resize_situation
-r="$r|(?:.*id\-1b512062\-8093\-438e\-b47a\-37d2f597cd64.*)"
-# tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_resize_server_revert_deleted_flavor
-r="$r|(?:.*id\-33f1fec3\-ba18\-4470\-8e4e\-1d888e7c3593.*)"
-# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_verify_resize_state
-r="$r|(?:.*id\-ab0c38b4\-cdd8\-49d3\-9b92\-0cb898723c01.*)"
-# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_resize_server_from_auto_to_manual
-r="$r|(?:.*id\-693d16f3\-556c\-489a\-8bac\-3d0ca2490bad.*)"
-# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_resize_server_from_manual_to_auto
-r="$r|(?:.*id\-414e7e93\-45b5\-44bc\-8e03\-55159c6bfc97.*)"
-# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm
-r="$r|(?:.*id\-1499262a\-9328\-4eda\-9068\-db1ac57498d2.*)"
-# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped
-r="$r|(?:.*id\-138b131d\-66df\-48c9\-a171\-64f45eb92962.*)"
-# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert
-r="$r|(?:.*id\-c03aab19\-adb1\-44f5\-917d\-c419577e9e68.*)"
-# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert_with_volume_attached
-r="$r|(?:.*id\-fbbf075f\-a812\-4022\-bc5c\-ccb8047eef12.*)"
-r="$r).*$"
-
-export DEVSTACK_GATE_TEMPEST_REGEX="$r"
diff --git a/devstack/tempest-dsvm-tempest-xen-rc b/devstack/tempest-dsvm-tempest-xen-rc
deleted file mode 100644
index 669c7054674..00000000000
--- a/devstack/tempest-dsvm-tempest-xen-rc
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-#
-# This script is executed in the Xen Project OpenStack CI dsvm-tempest-xen job.
-# It's used to configure which tempest tests actually get run. You can find
-# the CI job configuration here:
-#
-# https://xenbits.xen.org/gitweb/?p=openstack/ci-loop-config.git;a=blob;f=jenkins/jobs/jobs.yaml;hb=HEAD
-#
-
-# When adding entries to the regex, add a comment explaining why
-# since this list should not grow.
-
-# exclude the slow tag
-r="\[.*\bslow\b.*\]"
-
-# volume_swap fail
-# https://bugs.launchpad.net/nova/+bug/1676499
-r="$r|tempest\.api\.compute\.admin\.test_volume_swap\.TestVolumeSwap\.test_volume_swap"
-
-# Because paused guest can not be snapshot
-# https://bugs.launchpad.net/nova/+bug/1675787
-r="$r|tempest\.api\.compute\.images\.test_images\.ImagesTestJSON\.test_create_image_from_paused_server"
-
-# Cannot boot from encrypted volume
-# https://bugs.launchpad.net/nova/+bug/1702897
-r="$r|tempest\.scenario\.test_volume_boot_pattern\.TestVolumeBootPattern\.test_boot_server_from_encrypted_volume_luks"
-
-r="^(?!.*(?:$r))(?:^tempest\.(?:api|scenario|thirdparty))"
-export DEVSTACK_GATE_TEMPEST_REGEX="$r"
diff --git a/doc/README.rst b/doc/README.rst
index 88b6c1d6ba1..fbb88995d18 100644
--- a/doc/README.rst
+++ b/doc/README.rst
@@ -8,7 +8,7 @@ Contributor developer docs are built to:
https://docs.openstack.org/nova/latest/
API guide docs are built to:
-https://developer.openstack.org/api-guide/compute/
+https://docs.openstack.org/api-guide/compute/
For more details, see the "Building the Documentation" section of
doc/source/contributor/development-environment.rst.
diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json
index 63fc8738b03..856fc38c01f 100644
--- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json
+++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json
@@ -1,6 +1,6 @@
{
"extra_specs": {
- "key1": "value1",
- "key2": "value2"
+ "hw:cpu_policy": "shared",
+ "hw:numa_nodes": "1"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json
index 63fc8738b03..856fc38c01f 100644
--- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json
+++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json
@@ -1,6 +1,6 @@
{
"extra_specs": {
- "key1": "value1",
- "key2": "value2"
+ "hw:cpu_policy": "shared",
+ "hw:numa_nodes": "1"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json
index e71755fe675..02284618e52 100644
--- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json
+++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json
@@ -1,3 +1,3 @@
{
- "key1": "value1"
-}
\ No newline at end of file
+ "hw:numa_nodes": "1"
+}
diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json
index 63fc8738b03..856fc38c01f 100644
--- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json
+++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json
@@ -1,6 +1,6 @@
{
"extra_specs": {
- "key1": "value1",
- "key2": "value2"
+ "hw:cpu_policy": "shared",
+ "hw:numa_nodes": "1"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json
index a40d79e320c..eca615335a8 100644
--- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json
+++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json
@@ -1,3 +1,3 @@
{
- "key1": "new_value1"
-}
\ No newline at end of file
+ "hw:numa_nodes": "2"
+}
diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json
index a40d79e320c..eca615335a8 100644
--- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json
+++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json
@@ -1,3 +1,3 @@
{
- "key1": "new_value1"
-}
\ No newline at end of file
+ "hw:numa_nodes": "2"
+}
diff --git a/doc/api_samples/flavor-manage/v2.75/flavor-create-post-req.json b/doc/api_samples/flavor-manage/v2.75/flavor-create-post-req.json
new file mode 100644
index 00000000000..0d9926d7202
--- /dev/null
+++ b/doc/api_samples/flavor-manage/v2.75/flavor-create-post-req.json
@@ -0,0 +1,11 @@
+{
+ "flavor": {
+ "name": "test_flavor",
+ "ram": 1024,
+ "vcpus": 2,
+ "disk": 10,
+ "id": "10",
+ "rxtx_factor": 2.0,
+ "description": "test description"
+ }
+}
diff --git a/doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json b/doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json
new file mode 100644
index 00000000000..49dfd0c082a
--- /dev/null
+++ b/doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json
@@ -0,0 +1,26 @@
+{
+ "flavor": {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 10,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "10",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/10",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/10",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "test_flavor",
+ "ram": 1024,
+ "swap": 0,
+ "rxtx_factor": 2.0,
+ "vcpus": 2,
+ "description": "test description",
+ "extra_specs": {}
+ }
+}
diff --git a/doc/api_samples/flavor-manage/v2.75/flavor-update-req.json b/doc/api_samples/flavor-manage/v2.75/flavor-update-req.json
new file mode 100644
index 00000000000..93c8e1e8ab2
--- /dev/null
+++ b/doc/api_samples/flavor-manage/v2.75/flavor-update-req.json
@@ -0,0 +1,5 @@
+{
+ "flavor": {
+ "description": "updated description"
+ }
+}
diff --git a/doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json b/doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json
new file mode 100644
index 00000000000..4e92b10582c
--- /dev/null
+++ b/doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json
@@ -0,0 +1,26 @@
+{
+ "flavor": {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1,
+ "rxtx_factor": 1.0,
+ "description": "updated description",
+ "extra_specs": {}
+ }
+}
diff --git a/doc/api_samples/flavors/v2.61/flavor-get-resp.json b/doc/api_samples/flavors/v2.61/flavor-get-resp.json
index 124110adb7b..324b7711b86 100644
--- a/doc/api_samples/flavors/v2.61/flavor-get-resp.json
+++ b/doc/api_samples/flavors/v2.61/flavor-get-resp.json
@@ -22,8 +22,8 @@
"rxtx_factor": 1.0,
"description": "test description",
"extra_specs": {
- "key1": "value1",
- "key2": "value2"
+ "hw:cpu_policy": "shared",
+ "hw:numa_nodes": "1"
}
}
}
diff --git a/doc/api_samples/flavors/v2.61/flavors-detail-resp.json b/doc/api_samples/flavors/v2.61/flavors-detail-resp.json
index f615998043f..d4efe491f25 100644
--- a/doc/api_samples/flavors/v2.61/flavors-detail-resp.json
+++ b/doc/api_samples/flavors/v2.61/flavors-detail-resp.json
@@ -143,8 +143,7 @@
"rxtx_factor": 1.0,
"description": null,
"extra_specs": {
- "hw:mem_page_size": "2048",
- "hw:cpu_policy": "dedicated"
+ "hw:numa_nodes": "1"
}
},
{
@@ -170,8 +169,8 @@
"rxtx_factor": 1.0,
"description": "test description",
"extra_specs": {
- "key1": "value1",
- "key2": "value2"
+ "hw:cpu_policy": "shared",
+ "hw:numa_nodes": "1"
}
}
]
diff --git a/doc/api_samples/flavors/v2.75/flavor-get-resp.json b/doc/api_samples/flavors/v2.75/flavor-get-resp.json
new file mode 100644
index 00000000000..1d3c709b722
--- /dev/null
+++ b/doc/api_samples/flavors/v2.75/flavor-get-resp.json
@@ -0,0 +1,29 @@
+{
+ "flavor": {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "7",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small.description",
+ "ram": 2048,
+ "swap": 0,
+ "vcpus": 1,
+ "rxtx_factor": 1.0,
+ "description": "test description",
+ "extra_specs": {
+ "hw:cpu_policy": "shared",
+ "hw:numa_nodes": "1"
+ }
+ }
+}
diff --git a/doc/api_samples/flavors/v2.75/flavors-detail-resp.json b/doc/api_samples/flavors/v2.75/flavors-detail-resp.json
new file mode 100644
index 00000000000..35eac681e76
--- /dev/null
+++ b/doc/api_samples/flavors/v2.75/flavors-detail-resp.json
@@ -0,0 +1,177 @@
+{
+ "flavors": [
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1,
+ "rxtx_factor": 1.0,
+ "description": null,
+ "extra_specs": {}
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "ram": 2048,
+ "swap": 0,
+ "vcpus": 1,
+ "rxtx_factor": 1.0,
+ "description": null,
+ "extra_specs": {}
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 40,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "ram": 4096,
+ "swap": 0,
+ "vcpus": 2,
+ "rxtx_factor": 1.0,
+ "description": null,
+ "extra_specs": {}
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 80,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "ram": 8192,
+ "swap": 0,
+ "vcpus": 4,
+ "rxtx_factor": 1.0,
+ "description": null,
+ "extra_specs": {}
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 160,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "ram": 16384,
+ "swap": 0,
+ "vcpus": 8,
+ "rxtx_factor": 1.0,
+ "description": null,
+ "extra_specs": {}
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny.specs",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1,
+ "rxtx_factor": 1.0,
+ "description": null,
+ "extra_specs": {
+ "hw:numa_nodes": "1"
+ }
+ },
+ {
+ "OS-FLV-DISABLED:disabled": false,
+ "disk": 20,
+ "OS-FLV-EXT-DATA:ephemeral": 0,
+ "os-flavor-access:is_public": true,
+ "id": "7",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small.description",
+ "ram": 2048,
+ "swap": 0,
+ "vcpus": 1,
+ "rxtx_factor": 1.0,
+ "description": "test description",
+ "extra_specs": {
+ "hw:cpu_policy": "shared",
+ "hw:numa_nodes": "1"
+ }
+ }
+ ]
+}
diff --git a/doc/api_samples/flavors/v2.75/flavors-list-resp.json b/doc/api_samples/flavors/v2.75/flavors-list-resp.json
new file mode 100644
index 00000000000..f368ed5c66f
--- /dev/null
+++ b/doc/api_samples/flavors/v2.75/flavors-list-resp.json
@@ -0,0 +1,109 @@
+{
+ "flavors": [
+ {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny",
+ "description": null
+ },
+ {
+ "id": "2",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small",
+ "description": null
+ },
+ {
+ "id": "3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.medium",
+ "description": null
+ },
+ {
+ "id": "4",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.large",
+ "description": null
+ },
+ {
+ "id": "5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.xlarge",
+ "description": null
+ },
+ {
+ "id": "6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.tiny.specs",
+ "description": null
+ },
+ {
+ "id": "7",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "m1.small.description",
+ "description": "test description"
+ }
+ ]
+}
diff --git a/doc/api_samples/images/images-details-get-resp.json b/doc/api_samples/images/images-details-get-resp.json
index dfe6ca16faa..034c35f0c08 100644
--- a/doc/api_samples/images/images-details-get-resp.json
+++ b/doc/api_samples/images/images-details-get-resp.json
@@ -207,6 +207,7 @@
}
],
"metadata": {
+ "architecture": "x86_64",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
diff --git a/doc/api_samples/limits/limit-get-resp.json b/doc/api_samples/limits/limit-get-resp.json
index 28309af04c6..f97939d2216 100644
--- a/doc/api_samples/limits/limit-get-resp.json
+++ b/doc/api_samples/limits/limit-get-resp.json
@@ -4,11 +4,11 @@
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
- "maxSecurityGroupRules": 20,
- "maxSecurityGroups": 10,
+ "maxSecurityGroupRules": -1,
+ "maxSecurityGroups": -1,
"maxServerMeta": 128,
"maxTotalCores": 20,
- "maxTotalFloatingIps": 10,
+ "maxTotalFloatingIps": -1,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-add-host-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-add-host-post-req.json
new file mode 100644
index 00000000000..4e6bdfef3f4
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregate-add-host-post-req.json
@@ -0,0 +1,5 @@
+{
+ "add_host": {
+ "host": "compute"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json
new file mode 100644
index 00000000000..8894e97f069
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json
@@ -0,0 +1,6 @@
+{
+ "cache":
+ [
+ {"id": "70a599e0-31e7-49b7-b260-868f441e862b"}
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-metadata-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-metadata-post-req.json
new file mode 100644
index 00000000000..7331e06a8c0
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregate-metadata-post-req.json
@@ -0,0 +1,9 @@
+{
+ "set_metadata":
+ {
+ "metadata":
+ {
+ "key": "value"
+ }
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-post-req.json
new file mode 100644
index 00000000000..624fe0c6291
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregate-post-req.json
@@ -0,0 +1,7 @@
+{
+ "aggregate":
+ {
+ "name": "name",
+ "availability_zone": "london"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregate-post-resp.json
new file mode 100644
index 00000000000..2e399d9c6c4
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregate-post-resp.json
@@ -0,0 +1,12 @@
+{
+ "aggregate": {
+ "availability_zone": "london",
+ "created_at": "2019-10-08T15:15:27.988513",
+ "deleted": false,
+ "deleted_at": null,
+ "id": 1,
+ "name": "name",
+ "updated_at": null,
+ "uuid": "a25e34a2-4fc1-4876-82d0-cf930fa04b82"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-remove-host-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-remove-host-post-req.json
new file mode 100644
index 00000000000..e42b053009e
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregate-remove-host-post-req.json
@@ -0,0 +1,5 @@
+{
+ "remove_host": {
+ "host": "compute"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-req.json
new file mode 100644
index 00000000000..0af1a37a4d9
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-req.json
@@ -0,0 +1,7 @@
+{
+ "aggregate":
+ {
+ "name": "newname",
+ "availability_zone": "nova2"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-resp.json
new file mode 100644
index 00000000000..350128a1a55
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "aggregate": {
+ "availability_zone": "nova2",
+ "created_at": "2019-10-11T14:19:00.718841",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "nova2"
+ },
+ "name": "newname",
+ "updated_at": "2019-10-11T14:19:00.785838",
+ "uuid": "4e7fa22f-f6cf-4e81-a5c7-6dc485815f81"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-add-host-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-add-host-post-resp.json
new file mode 100644
index 00000000000..decbc8d365d
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregates-add-host-post-resp.json
@@ -0,0 +1,18 @@
+{
+ "aggregate": {
+ "availability_zone": "london",
+ "created_at": "2019-10-11T14:19:05.250053",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [
+ "compute"
+ ],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "london"
+ },
+ "name": "name",
+ "updated_at": null,
+ "uuid": "47832b50-a192-4900-affe-8f7fdf2d7f22"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-get-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-get-resp.json
new file mode 100644
index 00000000000..7d978bdf275
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregates-get-resp.json
@@ -0,0 +1,16 @@
+{
+ "aggregate": {
+ "availability_zone": "london",
+ "created_at": "2019-10-11T14:19:07.366577",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "london"
+ },
+ "name": "name",
+ "updated_at": null,
+ "uuid": "7c5ff84a-c901-4733-adf8-06875e265080"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-list-get-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-list-get-resp.json
new file mode 100644
index 00000000000..e1b5f11539a
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregates-list-get-resp.json
@@ -0,0 +1,20 @@
+{
+ "aggregates": [
+ {
+ "availability_zone": "london",
+ "created_at": "2019-10-11T14:19:07.386637",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [
+ "compute"
+ ],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "london"
+ },
+ "name": "name",
+ "updated_at": null,
+ "uuid": "070cb72c-f463-4f72-9c61-2c0556eb8c07"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-metadata-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-metadata-post-resp.json
new file mode 100644
index 00000000000..f0860dad8ec
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregates-metadata-post-resp.json
@@ -0,0 +1,17 @@
+{
+ "aggregate": {
+ "availability_zone": "london",
+ "created_at": "2019-10-11T14:19:03.103465",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "london",
+ "key": "value"
+ },
+ "name": "name",
+ "updated_at": "2019-10-11T14:19:03.169058",
+ "uuid": "0843db7c-f161-446d-84c8-d936320da2e8"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-remove-host-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-remove-host-post-resp.json
new file mode 100644
index 00000000000..b9b5bdefcde
--- /dev/null
+++ b/doc/api_samples/os-aggregates/v2.81/aggregates-remove-host-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "aggregate": {
+ "availability_zone": "london",
+ "created_at": "2019-10-11T14:19:05.250053",
+ "deleted": false,
+ "deleted_at": null,
+ "hosts": [],
+ "id": 1,
+ "metadata": {
+ "availability_zone": "london"
+ },
+ "name": "name",
+ "updated_at": null,
+ "uuid": "47832b50-a192-4900-affe-8f7fdf2d7f22"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-net_id-req.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-net_id-req.json
new file mode 100644
index 00000000000..d64d7fbc3e0
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-net_id-req.json
@@ -0,0 +1,11 @@
+{
+ "interfaceAttachment": {
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3"
+ }
+ ],
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "tag": "public"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-req.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-req.json
new file mode 100644
index 00000000000..4e7285d0e98
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-req.json
@@ -0,0 +1,6 @@
+{
+ "interfaceAttachment": {
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "tag": "public"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json
new file mode 100644
index 00000000000..0b0cf34a912
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json
@@ -0,0 +1,15 @@
+{
+ "interfaceAttachment": {
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "port_state": "ACTIVE",
+ "tag": "public"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json
new file mode 100644
index 00000000000..61de503fb4c
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json
@@ -0,0 +1,17 @@
+{
+ "interfaceAttachments": [
+ {
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "port_state": "ACTIVE",
+ "tag": "public"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json
new file mode 100644
index 00000000000..0b0cf34a912
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json
@@ -0,0 +1,15 @@
+{
+ "interfaceAttachment": {
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "port_state": "ACTIVE",
+ "tag": "public"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json b/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json
index 5612a310b34..22181387783 100644
--- a/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json
+++ b/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json
@@ -9,20 +9,6 @@
"updated_at": null
}
},
- "consoleauth": {
- "nova-consoleauth": {
- "active": true,
- "available": true,
- "updated_at": null
- }
- },
- "network": {
- "nova-network": {
- "active": true,
- "available": true,
- "updated_at": null
- }
- },
"scheduler": {
"nova-scheduler": {
"active": true,
diff --git a/doc/api_samples/os-availability-zone/availability-zone-post-req.json b/doc/api_samples/os-availability-zone/availability-zone-post-req.json
deleted file mode 100644
index e19960f6be3..00000000000
--- a/doc/api_samples/os-availability-zone/availability-zone-post-req.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "server" : {
- "name" : "new-server-test",
- "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
- "flavorRef" : "1",
- "metadata" : {
- "My Server Name" : "Apache1"
- },
- "availability_zone": "nova",
- "personality" : [
- {
- "path" : "/etc/banner.txt",
- "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
- }
- ]
- }
-}
diff --git a/doc/api_samples/os-availability-zone/availability-zone-post-resp.json b/doc/api_samples/os-availability-zone/availability-zone-post-resp.json
deleted file mode 100644
index a13b8b9a5b3..00000000000
--- a/doc/api_samples/os-availability-zone/availability-zone-post-resp.json
+++ /dev/null
@@ -1,22 +0,0 @@
-{
- "server": {
- "adminPass": "k4pKvTfcA4gY",
- "id": "3e45fa2a-5204-466f-a684-c2a8e1c82d7f",
- "links": [
- {
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/3e45fa2a-5204-466f-a684-c2a8e1c82d7f",
- "rel": "self"
- },
- {
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/3e45fa2a-5204-466f-a684-c2a8e1c82d7f",
- "rel": "bookmark"
- }
- ],
- "OS-DCF:diskConfig": "AUTO",
- "security_groups": [
- {
- "name": "default"
- }
- ]
- }
-}
\ No newline at end of file
diff --git a/doc/api_samples/os-cells/cells-list-empty-resp.json b/doc/api_samples/os-cells/cells-list-empty-resp.json
deleted file mode 100644
index 5325a4e855e..00000000000
--- a/doc/api_samples/os-cells/cells-list-empty-resp.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "cells": []
-}
\ No newline at end of file
diff --git a/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json b/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json
index caaa22774ad..e6bc92a6268 100644
--- a/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json
+++ b/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json
@@ -3,7 +3,7 @@
{
"created_at": "2012-11-27T17:18:01Z",
"instance_id": "27deecdb-baa3-4a26-9c82-32994b815b01",
- "internal_ip": "192.168.0.3",
+ "internal_ip": "192.168.1.30",
"project_id": "fa1765bd-a352-49c7-a6b7-8ee108a3cb0c",
"public_ip": "127.0.0.1",
"public_port": 22,
diff --git a/doc/api_samples/os-evacuate/v2.68/server-evacuate-find-host-req.json b/doc/api_samples/os-evacuate/v2.68/server-evacuate-find-host-req.json
new file mode 100644
index 00000000000..bb3a11d9799
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.68/server-evacuate-find-host-req.json
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "MySecretPass"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-evacuate/v2.68/server-evacuate-req.json b/doc/api_samples/os-evacuate/v2.68/server-evacuate-req.json
new file mode 100644
index 00000000000..f67555075aa
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.68/server-evacuate-req.json
@@ -0,0 +1,6 @@
+{
+ "evacuate": {
+ "host": "testHost",
+ "adminPass": "MySecretPass"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-floating-ips/floating-ips-create-req.json b/doc/api_samples/os-floating-ips/floating-ips-create-req.json
index 511b009bede..4bba875a5c4 100644
--- a/doc/api_samples/os-floating-ips/floating-ips-create-req.json
+++ b/doc/api_samples/os-floating-ips/floating-ips-create-req.json
@@ -1,3 +1,3 @@
{
- "pool": "nova"
-}
\ No newline at end of file
+ "pool": "public"
+}
diff --git a/doc/api_samples/os-floating-ips/floating-ips-create-resp.json b/doc/api_samples/os-floating-ips/floating-ips-create-resp.json
index fe161a7dd12..33c2c350b86 100644
--- a/doc/api_samples/os-floating-ips/floating-ips-create-resp.json
+++ b/doc/api_samples/os-floating-ips/floating-ips-create-resp.json
@@ -1,9 +1,9 @@
{
"floating_ip": {
"fixed_ip": null,
- "id": 1,
+ "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c",
"instance_id": null,
- "ip": "10.10.10.1",
- "pool": "nova"
+ "ip": "172.24.4.17",
+ "pool": "public"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-floating-ips/floating-ips-get-resp.json b/doc/api_samples/os-floating-ips/floating-ips-get-resp.json
index fe161a7dd12..33c2c350b86 100644
--- a/doc/api_samples/os-floating-ips/floating-ips-get-resp.json
+++ b/doc/api_samples/os-floating-ips/floating-ips-get-resp.json
@@ -1,9 +1,9 @@
{
"floating_ip": {
"fixed_ip": null,
- "id": 1,
+ "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c",
"instance_id": null,
- "ip": "10.10.10.1",
- "pool": "nova"
+ "ip": "172.24.4.17",
+ "pool": "public"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json b/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json
index 121dbd084e8..12f118da50d 100644
--- a/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json
+++ b/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json
@@ -1,3 +1,3 @@
{
"floating_ips": []
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-floating-ips/floating-ips-list-resp.json b/doc/api_samples/os-floating-ips/floating-ips-list-resp.json
index 4d58e0676a9..8585c4c7f9d 100644
--- a/doc/api_samples/os-floating-ips/floating-ips-list-resp.json
+++ b/doc/api_samples/os-floating-ips/floating-ips-list-resp.json
@@ -2,17 +2,17 @@
"floating_ips": [
{
"fixed_ip": null,
- "id": 1,
+ "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c",
"instance_id": null,
- "ip": "10.10.10.1",
- "pool": "nova"
+ "ip": "172.24.4.17",
+ "pool": "public"
},
{
"fixed_ip": null,
- "id": 2,
+ "id": "05ef7490-745a-4af9-98e5-610dc97493c4",
"instance_id": null,
- "ip": "10.10.10.2",
- "pool": "nova"
+ "ip": "172.24.4.78",
+ "pool": "public"
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-hide-server-addresses/server-get-resp.json b/doc/api_samples/os-hide-server-addresses/server-get-resp.json
deleted file mode 100644
index e72c78649df..00000000000
--- a/doc/api_samples/os-hide-server-addresses/server-get-resp.json
+++ /dev/null
@@ -1,68 +0,0 @@
-{
- "server": {
- "accessIPv4": "1.2.3.4",
- "accessIPv6": "80fe::",
- "addresses": {},
- "created": "2013-09-24T14:39:00Z",
- "flavor": {
- "id": "1",
- "links": [
- {
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1",
- "rel": "bookmark"
- }
- ]
- },
- "hostId": "d0635823e9162b22b90ff103f0c30f129bacf6ffb72f4d6fde87e738",
- "id": "4bdee8c7-507f-40f2-8429-d301edd3791b",
- "image": {
- "id": "70a599e0-31e7-49b7-b260-868f441e862b",
- "links": [
- {
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "rel": "bookmark"
- }
- ]
- },
- "key_name": null,
- "links": [
- {
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/4bdee8c7-507f-40f2-8429-d301edd3791b",
- "rel": "self"
- },
- {
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/4bdee8c7-507f-40f2-8429-d301edd3791b",
- "rel": "bookmark"
- }
- ],
- "metadata": {
- "My Server Name": "Apache1"
- },
- "name": "new-server-test",
- "config_drive": "",
- "OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
- "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
- "OS-EXT-STS:power_state": 1,
- "OS-EXT-STS:task_state": null,
- "OS-EXT-STS:vm_state": "active",
- "os-extended-volumes:volumes_attached": [
- {"id": "volume_id1"},
- {"id": "volume_id2"}
- ],
- "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
- "OS-SRV-USG:terminated_at": null,
- "progress": 0,
- "security_groups": [
- {
- "name": "default"
- }
- ],
- "status": "ACTIVE",
- "tenant_id": "6f70656e737461636b20342065766572",
- "updated": "2013-09-24T14:39:01Z",
- "user_id": "fake"
- }
-}
diff --git a/doc/api_samples/os-hide-server-addresses/servers-details-resp.json b/doc/api_samples/os-hide-server-addresses/servers-details-resp.json
deleted file mode 100644
index cfbf90ee893..00000000000
--- a/doc/api_samples/os-hide-server-addresses/servers-details-resp.json
+++ /dev/null
@@ -1,76 +0,0 @@
-{
- "servers": [
- {
- "accessIPv4": "1.2.3.4",
- "accessIPv6": "80fe::",
- "addresses": {},
- "created": "2013-09-24T14:44:01Z",
- "flavor": {
- "id": "1",
- "links": [
- {
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1",
- "rel": "bookmark"
- }
- ]
- },
- "hostId": "a4fa72ae8741e5e18fb062c15657b8f689b8da2837b734c61fc9eedd",
- "id": "a747eac1-e3ed-446c-935a-c2a2853f919c",
- "image": {
- "id": "70a599e0-31e7-49b7-b260-868f441e862b",
- "links": [
- {
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "rel": "bookmark"
- }
- ]
- },
- "key_name": null,
- "links": [
- {
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a747eac1-e3ed-446c-935a-c2a2853f919c",
- "rel": "self"
- },
- {
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a747eac1-e3ed-446c-935a-c2a2853f919c",
- "rel": "bookmark"
- }
- ],
- "metadata": {
- "My Server Name": "Apache1"
- },
- "name": "new-server-test",
- "config_drive": "",
- "OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
- "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
- "OS-EXT-STS:power_state": 1,
- "OS-EXT-STS:task_state": null,
- "OS-EXT-STS:vm_state": "active",
- "os-extended-volumes:volumes_attached": [
- {"id": "volume_id1"},
- {"id": "volume_id2"}
- ],
- "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549",
- "OS-SRV-USG:terminated_at": null,
- "progress": 0,
- "security_groups": [
- {
- "name": "default"
- }
- ],
- "status": "ACTIVE",
- "tenant_id": "6f70656e737461636b20342065766572",
- "updated": "2013-09-24T14:44:01Z",
- "user_id": "fake"
- }
- ],
- "servers_links": [
- {
- "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=a747eac1-e3ed-446c-935a-c2a2853f919c",
- "rel": "next"
- }
- ]
-}
diff --git a/doc/api_samples/os-hide-server-addresses/servers-list-resp.json b/doc/api_samples/os-hide-server-addresses/servers-list-resp.json
deleted file mode 100644
index 9481378c2ab..00000000000
--- a/doc/api_samples/os-hide-server-addresses/servers-list-resp.json
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "servers": [
- {
- "id": "b2a7068b-8aed-41a4-aa74-af8feb984bae",
- "links": [
- {
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/b2a7068b-8aed-41a4-aa74-af8feb984bae",
- "rel": "self"
- },
- {
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/b2a7068b-8aed-41a4-aa74-af8feb984bae",
- "rel": "bookmark"
- }
- ],
- "name": "new-server-test"
- }
- ],
- "servers_links": [
- {
- "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=b2a7068b-8aed-41a4-aa74-af8feb984bae",
- "rel": "next"
- }
- ]
-}
\ No newline at end of file
diff --git a/doc/api_samples/os-hosts/hosts-list-resp.json b/doc/api_samples/os-hosts/hosts-list-resp.json
index 1413b3aabdc..cd6b8d0c6a2 100644
--- a/doc/api_samples/os-hosts/hosts-list-resp.json
+++ b/doc/api_samples/os-hosts/hosts-list-resp.json
@@ -10,16 +10,6 @@
"service": "compute",
"zone": "nova"
},
- {
- "host_name": "e73ec0bd35c64de4a1adfa8b8969a1f6",
- "service": "consoleauth",
- "zone": "internal"
- },
- {
- "host_name": "396a8a0a234f476eb05fb9fbc5802ba7",
- "service": "network",
- "zone": "internal"
- },
{
"host_name": "abffda96592c4eacaf4111c28fddee17",
"service": "scheduler",
diff --git a/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json
index 267e9d50998..2da7f09f4e5 100644
--- a/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json
+++ b/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json
@@ -33,7 +33,7 @@
"running_vms": 0,
"service": {
"host": "host1",
- "id": 7,
+ "id": 6,
"disabled_reason": null
},
"vcpus": 2,
@@ -42,7 +42,7 @@
],
"hypervisors_links": [
{
- "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=2",
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=2",
"rel": "next"
}
]
diff --git a/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json b/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json
index 9a5771df022..bb531ace7a5 100644
--- a/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json
+++ b/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json
@@ -9,7 +9,7 @@
],
"hypervisors_links": [
{
- "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors?limit=1&marker=2",
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=2",
"rel": "next"
}
]
diff --git a/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json
index a2172f69a22..ed62b8cd476 100644
--- a/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json
+++ b/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json
@@ -42,7 +42,7 @@
],
"hypervisors_links": [
{
- "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3",
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3",
"rel": "next"
}
]
diff --git a/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json b/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json
index ec10b4a106e..2171311a16c 100644
--- a/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json
+++ b/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json
@@ -1,7 +1,7 @@
{
"hypervisors": [
{
- "hypervisor_hostname": "fake-mini",
+ "hypervisor_hostname": "host2",
"id": "1bb62a04-c576-402c-8147-9e89757a09e3",
"state": "up",
"status": "enabled"
@@ -9,7 +9,7 @@
],
"hypervisors_links": [
{
- "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3",
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3",
"rel": "next"
}
]
diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json
new file mode 100644
index 00000000000..a009a125ffc
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json
@@ -0,0 +1,25 @@
+{
+ "hypervisors": [
+ {
+ "host_ip": "192.168.1.135",
+ "hypervisor_hostname": "host2",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": "f6d28711-9c10-470e-8b31-c03f498b0032",
+ "service": {
+ "disabled_reason": null,
+ "host": "host2",
+ "id": "21bbb5fb-ec98-48b3-89cf-c94402c55611"
+ },
+ "state": "up",
+ "status": "enabled",
+ "uptime": null
+ }
+ ],
+ "hypervisors_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=f6d28711-9c10-470e-8b31-c03f498b0032",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-with-servers-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-with-servers-resp.json
new file mode 100644
index 00000000000..26526b18cc2
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-with-servers-resp.json
@@ -0,0 +1,29 @@
+{
+ "hypervisors": [
+ {
+ "host_ip": "192.168.1.135",
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": "28b0e607-d58a-4602-a511-efe18024f4d5",
+ "servers": [
+ {
+ "name": "test_server1",
+ "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
+ },
+ {
+ "name": "test_server2",
+ "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
+ }
+ ],
+ "service": {
+ "disabled_reason": null,
+ "host": "compute",
+ "id": "40e769a5-7489-4cf3-be46-f6bd3e4e3c25"
+ },
+ "state": "up",
+ "status": "enabled",
+ "uptime": null
+ }
+ ]
+}
diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-list-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-list-resp.json
new file mode 100644
index 00000000000..7c042bf8fa6
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-list-resp.json
@@ -0,0 +1,16 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "host2",
+ "id": "bfb90ba3-e13e-4413-90ff-5cdbfea727e2",
+ "state": "up",
+ "status": "enabled"
+ }
+ ],
+ "hypervisors_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=bfb90ba3-e13e-4413-90ff-5cdbfea727e2",
+ "rel": "next"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-search-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-search-resp.json
new file mode 100644
index 00000000000..6190e428bd7
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-search-resp.json
@@ -0,0 +1,10 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": "6b7876c5-9ae7-4fa7-a5c8-28c796d17381",
+ "state": "up",
+ "status": "enabled"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-resp.json
new file mode 100644
index 00000000000..d1e566d6eb6
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-resp.json
@@ -0,0 +1,17 @@
+{
+ "hypervisor": {
+ "host_ip": "192.168.1.135",
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": "f79c1cce-9972-44c6-aa30-1d9e6526ce37",
+ "service": {
+ "disabled_reason": null,
+ "host": "compute",
+ "id": "7e6b27b8-f563-4c21-baa4-a40d579ed8c4"
+ },
+ "state": "up",
+ "status": "enabled",
+ "uptime": null
+ }
+}
diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json
new file mode 100644
index 00000000000..0196b9ca5ea
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json
@@ -0,0 +1,27 @@
+{
+ "hypervisor": {
+ "host_ip": "192.168.1.135",
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": "a68a56ab-9c42-47c0-9309-879e4a6dbe86",
+ "servers": [
+ {
+ "name": "test_server1",
+ "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
+ },
+ {
+ "name": "test_server2",
+ "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
+ }
+ ],
+ "service": {
+ "disabled_reason": null,
+ "host": "compute",
+ "id": "8495059a-a079-4ab4-ad6f-cf45b81c877d"
+ },
+ "state": "up",
+ "status": "enabled",
+ "uptime": null
+ }
+}
diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-with-servers-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-with-servers-resp.json
new file mode 100644
index 00000000000..abaea1ffd4e
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-with-servers-resp.json
@@ -0,0 +1,20 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": "39b0c938-8e2f-49da-bb52-e85c78d4ff2a",
+ "servers": [
+ {
+ "name": "test_server1",
+ "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
+ },
+ {
+ "name": "test_server2",
+ "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
+ }
+ ],
+ "state": "up",
+ "status": "enabled"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-timestamp-filter.json b/doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-changes-since.json
similarity index 100%
rename from doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-timestamp-filter.json
rename to doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-changes-since.json
diff --git a/doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-timestamp-filter.json b/doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-changes-since.json
similarity index 100%
rename from doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-timestamp-filter.json
rename to doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-changes-since.json
diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-action-get-non-admin-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-action-get-non-admin-resp.json
new file mode 100644
index 00000000000..115604d6ac8
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.66/instance-action-get-non-admin-resp.json
@@ -0,0 +1,21 @@
+{
+ "instanceAction": {
+ "action": "stop",
+ "events": [
+ {
+ "event": "compute_stop_instance",
+ "finish_time": "2018-04-25T01:26:34.784165",
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "result": "Success",
+ "start_time": "2018-04-25T01:26:34.612020"
+ }
+ ],
+ "instance_uuid": "79edaa44-ad4f-4af7-b994-154518c2b927",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-8eb28d4a-db6c-4337-bab8-ce154e9c620e",
+ "start_time": "2018-04-25T01:26:34.388280",
+ "updated_at": "2018-04-25T01:26:34.784165",
+ "user_id": "fake"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-action-get-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-action-get-resp.json
new file mode 100644
index 00000000000..57ae490f023
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.66/instance-action-get-resp.json
@@ -0,0 +1,23 @@
+{
+ "instanceAction": {
+ "action": "stop",
+ "events": [
+ {
+ "event": "compute_stop_instance",
+ "finish_time": "2018-04-25T01:26:36.790544",
+ "host": "compute",
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "result": "Success",
+ "start_time": "2018-04-25T01:26:36.539271",
+ "traceback": null
+ }
+ ],
+ "instance_uuid": "4bf3473b-d550-4b65-9409-292d44ab14a2",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-0d819d5c-1527-4669-bdf0-ffad31b5105b",
+ "start_time": "2018-04-25T01:26:36.341290",
+ "updated_at": "2018-04-25T01:26:36.790544",
+ "user_id": "admin"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-resp.json
new file mode 100644
index 00000000000..0b2254126b1
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-resp.json
@@ -0,0 +1,24 @@
+{
+ "instanceActions": [
+ {
+ "action": "stop",
+ "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-f04d4b92-6241-42da-b82d-2cedb225c58d",
+ "start_time": "2018-04-25T01:26:36.036697",
+ "updated_at": "2018-04-25T01:26:36.525308",
+ "user_id": "admin"
+ },
+ {
+ "action": "create",
+ "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0",
+ "start_time": "2018-04-25T01:26:33.692125",
+ "updated_at": "2018-04-25T01:26:35.993821",
+ "user_id": "admin"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-before.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-before.json
new file mode 100644
index 00000000000..28c58384e70
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-before.json
@@ -0,0 +1,24 @@
+{
+ "instanceActions": [
+ {
+ "action": "stop",
+ "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642",
+ "start_time": "2018-04-25T01:26:29.594237",
+ "updated_at": "2018-04-25T01:26:30.065061",
+ "user_id": "admin"
+ },
+ {
+ "action": "create",
+ "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0",
+ "start_time": "2018-04-25T01:26:33.692125",
+ "updated_at": "2018-04-25T01:26:35.993821",
+ "user_id": "admin"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json
new file mode 100644
index 00000000000..346c93af7a9
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json
@@ -0,0 +1,14 @@
+{
+ "instanceActions": [
+ {
+ "action": "stop",
+ "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642",
+ "start_time": "2018-04-25T01:26:29.594237",
+ "updated_at": "2018-04-25T01:26:30.065061",
+ "user_id": "admin"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json
new file mode 100644
index 00000000000..7126a9f2820
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json
@@ -0,0 +1,20 @@
+{
+ "instanceActions": [
+ {
+ "action": "stop",
+ "instance_uuid": "ca3d3be5-1a40-427f-9515-f5e181f479d0",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-4dbefbb7-d743-4d42-b0a1-a79cbe256138",
+ "start_time": "2018-04-25T01:26:28.909887",
+ "updated_at": "2018-04-25T01:26:29.400606",
+ "user_id": "admin"
+ }
+ ],
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ca3d3be5-1a40-427f-9515-f5e181f479d0/os-instance-actions?limit=1&marker=req-4dbefbb7-d743-4d42-b0a1-a79cbe256138",
+ "rel": "next"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json
new file mode 100644
index 00000000000..3f6921cb795
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json
@@ -0,0 +1,14 @@
+{
+ "instanceActions": [
+ {
+ "action": "create",
+ "instance_uuid": "9bde1fd5-8435-45c5-afc1-bedd0605275b",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-4510fb10-447f-4572-a64d-c2324547d86c",
+ "start_time": "2018-04-25T01:26:33.710291",
+ "updated_at": "2018-04-25T01:26:35.374936",
+ "user_id": "fake"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json
new file mode 100644
index 00000000000..115604d6ac8
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json
@@ -0,0 +1,21 @@
+{
+ "instanceAction": {
+ "action": "stop",
+ "events": [
+ {
+ "event": "compute_stop_instance",
+ "finish_time": "2018-04-25T01:26:34.784165",
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "result": "Success",
+ "start_time": "2018-04-25T01:26:34.612020"
+ }
+ ],
+ "instance_uuid": "79edaa44-ad4f-4af7-b994-154518c2b927",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-8eb28d4a-db6c-4337-bab8-ce154e9c620e",
+ "start_time": "2018-04-25T01:26:34.388280",
+ "updated_at": "2018-04-25T01:26:34.784165",
+ "user_id": "fake"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json
new file mode 100644
index 00000000000..3285f39ef6a
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json
@@ -0,0 +1,24 @@
+{
+ "instanceAction": {
+ "action": "stop",
+ "events": [
+ {
+ "event": "compute_stop_instance",
+ "finish_time": "2018-04-25T01:26:36.790544",
+ "host": "compute",
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "result": "Success",
+ "start_time": "2018-04-25T01:26:36.539271",
+ "traceback": null,
+ "details": null
+ }
+ ],
+ "instance_uuid": "4bf3473b-d550-4b65-9409-292d44ab14a2",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-0d819d5c-1527-4669-bdf0-ffad31b5105b",
+ "start_time": "2018-04-25T01:26:36.341290",
+ "updated_at": "2018-04-25T01:26:36.790544",
+ "user_id": "admin"
+ }
+}
diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json
new file mode 100644
index 00000000000..0b2254126b1
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json
@@ -0,0 +1,24 @@
+{
+ "instanceActions": [
+ {
+ "action": "stop",
+ "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-f04d4b92-6241-42da-b82d-2cedb225c58d",
+ "start_time": "2018-04-25T01:26:36.036697",
+ "updated_at": "2018-04-25T01:26:36.525308",
+ "user_id": "admin"
+ },
+ {
+ "action": "create",
+ "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0",
+ "start_time": "2018-04-25T01:26:33.692125",
+ "updated_at": "2018-04-25T01:26:35.993821",
+ "user_id": "admin"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json
new file mode 100644
index 00000000000..28c58384e70
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json
@@ -0,0 +1,24 @@
+{
+ "instanceActions": [
+ {
+ "action": "stop",
+ "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642",
+ "start_time": "2018-04-25T01:26:29.594237",
+ "updated_at": "2018-04-25T01:26:30.065061",
+ "user_id": "admin"
+ },
+ {
+ "action": "create",
+ "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0",
+ "start_time": "2018-04-25T01:26:33.692125",
+ "updated_at": "2018-04-25T01:26:35.993821",
+ "user_id": "admin"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json
new file mode 100644
index 00000000000..346c93af7a9
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json
@@ -0,0 +1,14 @@
+{
+ "instanceActions": [
+ {
+ "action": "stop",
+ "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642",
+ "start_time": "2018-04-25T01:26:29.594237",
+ "updated_at": "2018-04-25T01:26:30.065061",
+ "user_id": "admin"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json
new file mode 100644
index 00000000000..7126a9f2820
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json
@@ -0,0 +1,20 @@
+{
+ "instanceActions": [
+ {
+ "action": "stop",
+ "instance_uuid": "ca3d3be5-1a40-427f-9515-f5e181f479d0",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-4dbefbb7-d743-4d42-b0a1-a79cbe256138",
+ "start_time": "2018-04-25T01:26:28.909887",
+ "updated_at": "2018-04-25T01:26:29.400606",
+ "user_id": "admin"
+ }
+ ],
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ca3d3be5-1a40-427f-9515-f5e181f479d0/os-instance-actions?limit=1&marker=req-4dbefbb7-d743-4d42-b0a1-a79cbe256138",
+ "rel": "next"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json
new file mode 100644
index 00000000000..3f6921cb795
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json
@@ -0,0 +1,14 @@
+{
+ "instanceActions": [
+ {
+ "action": "create",
+ "instance_uuid": "9bde1fd5-8435-45c5-afc1-bedd0605275b",
+ "message": null,
+ "project_id": "6f70656e737461636b20342065766572",
+ "request_id": "req-4510fb10-447f-4572-a64d-c2324547d86c",
+ "start_time": "2018-04-25T01:26:33.710291",
+ "updated_at": "2018-04-25T01:26:35.374936",
+ "user_id": "fake"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/keypairs/keypairs-get-resp.json b/doc/api_samples/os-keypairs/keypairs-get-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/keypairs-get-resp.json
rename to doc/api_samples/os-keypairs/keypairs-get-resp.json
diff --git a/doc/api_samples/keypairs/keypairs-import-post-req.json b/doc/api_samples/os-keypairs/keypairs-import-post-req.json
similarity index 100%
rename from doc/api_samples/keypairs/keypairs-import-post-req.json
rename to doc/api_samples/os-keypairs/keypairs-import-post-req.json
diff --git a/doc/api_samples/keypairs/keypairs-import-post-resp.json b/doc/api_samples/os-keypairs/keypairs-import-post-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/keypairs-import-post-resp.json
rename to doc/api_samples/os-keypairs/keypairs-import-post-resp.json
diff --git a/doc/api_samples/keypairs/keypairs-list-resp.json b/doc/api_samples/os-keypairs/keypairs-list-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/keypairs-list-resp.json
rename to doc/api_samples/os-keypairs/keypairs-list-resp.json
diff --git a/doc/api_samples/keypairs/keypairs-post-req.json b/doc/api_samples/os-keypairs/keypairs-post-req.json
similarity index 100%
rename from doc/api_samples/keypairs/keypairs-post-req.json
rename to doc/api_samples/os-keypairs/keypairs-post-req.json
diff --git a/doc/api_samples/keypairs/keypairs-post-resp.json b/doc/api_samples/os-keypairs/keypairs-post-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/keypairs-post-resp.json
rename to doc/api_samples/os-keypairs/keypairs-post-resp.json
diff --git a/doc/api_samples/keypairs/v2.10/keypairs-get-resp.json b/doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.10/keypairs-get-resp.json
rename to doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json
diff --git a/doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json b/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json
rename to doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json
diff --git a/doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json b/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json
rename to doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json
diff --git a/doc/api_samples/keypairs/v2.10/keypairs-list-resp.json b/doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.10/keypairs-list-resp.json
rename to doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json
diff --git a/doc/api_samples/keypairs/v2.10/keypairs-post-req.json b/doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.10/keypairs-post-req.json
rename to doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json
diff --git a/doc/api_samples/keypairs/v2.10/keypairs-post-resp.json b/doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.10/keypairs-post-resp.json
rename to doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json
diff --git a/doc/api_samples/keypairs/v2.2/keypairs-get-resp.json b/doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.2/keypairs-get-resp.json
rename to doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json
diff --git a/doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json b/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json
rename to doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json
diff --git a/doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json b/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json
rename to doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json
diff --git a/doc/api_samples/keypairs/v2.2/keypairs-list-resp.json b/doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.2/keypairs-list-resp.json
rename to doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json
diff --git a/doc/api_samples/keypairs/v2.2/keypairs-post-req.json b/doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.2/keypairs-post-req.json
rename to doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json
diff --git a/doc/api_samples/keypairs/v2.2/keypairs-post-resp.json b/doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.2/keypairs-post-resp.json
rename to doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json b/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json
similarity index 89%
rename from doc/api_samples/keypairs/v2.35/keypairs-list-resp.json
rename to doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json
index 69c8ec4f143..786a0b6ce2f 100644
--- a/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json
+++ b/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json
@@ -11,8 +11,8 @@
],
"keypairs_links": [
{
- "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
"rel": "next"
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json b/doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json
rename to doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json b/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json
similarity index 88%
rename from doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json
rename to doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json
index 3c75f9ef621..e9a5e9318b6 100644
--- a/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json
+++ b/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json
@@ -11,8 +11,8 @@
],
"keypairs_links": [
{
- "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3&user_id=user2",
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3&user_id=user2",
"rel": "next"
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-post-req.json b/doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.35/keypairs-post-req.json
rename to doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json b/doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json
similarity index 100%
rename from doc/api_samples/keypairs/v2.35/keypairs-post-resp.json
rename to doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json
diff --git a/doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json b/doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json
new file mode 100644
index 00000000000..c307fb39bf7
--- /dev/null
+++ b/doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json
@@ -0,0 +1,3 @@
+{
+ "lock": {"locked_reason": "I don't want to work"}
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-lock-server/v2.73/lock-server.json b/doc/api_samples/os-lock-server/v2.73/lock-server.json
new file mode 100644
index 00000000000..d7e96964ef2
--- /dev/null
+++ b/doc/api_samples/os-lock-server/v2.73/lock-server.json
@@ -0,0 +1,3 @@
+{
+ "lock": null
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-lock-server/v2.73/unlock-server.json b/doc/api_samples/os-lock-server/v2.73/unlock-server.json
new file mode 100644
index 00000000000..0eba7e72529
--- /dev/null
+++ b/doc/api_samples/os-lock-server/v2.73/unlock-server.json
@@ -0,0 +1,3 @@
+{
+ "unlock": null
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-migrate-server/v2.68/live-migrate-server.json b/doc/api_samples/os-migrate-server/v2.68/live-migrate-server.json
new file mode 100644
index 00000000000..0777861df53
--- /dev/null
+++ b/doc/api_samples/os-migrate-server/v2.68/live-migrate-server.json
@@ -0,0 +1,6 @@
+{
+ "os-migrateLive": {
+ "host": "01c0cadef72d47e28a672a76060d492c",
+ "block_migration": "auto"
+ }
+}
diff --git a/doc/api_samples/os-migrations/migrations-get.json b/doc/api_samples/os-migrations/migrations-get.json
index 91775be7758..bdcc768681d 100644
--- a/doc/api_samples/os-migrations/migrations-get.json
+++ b/doc/api_samples/os-migrations/migrations-get.json
@@ -6,12 +6,12 @@
"dest_host": "1.2.3.4",
"dest_node": "node2",
"id": 1234,
- "instance_uuid": "instance_id_123",
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
"new_instance_type_id": 2,
"old_instance_type_id": 1,
"source_compute": "compute1",
"source_node": "node1",
- "status": "Done",
+ "status": "done",
"updated_at": "2012-10-29T13:42:02.000000"
},
{
@@ -20,12 +20,12 @@
"dest_host": "5.6.7.8",
"dest_node": "node20",
"id": 5678,
- "instance_uuid": "instance_id_456",
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
"new_instance_type_id": 6,
"old_instance_type_id": 5,
"source_compute": "compute10",
"source_node": "node10",
- "status": "Done",
+ "status": "done",
"updated_at": "2013-10-22T13:42:02.000000"
}
]
diff --git a/doc/api_samples/os-migrations/v2.59/migrations-get-with-timestamp-filter.json b/doc/api_samples/os-migrations/v2.59/migrations-get-with-changes-since.json
similarity index 100%
rename from doc/api_samples/os-migrations/v2.59/migrations-get-with-timestamp-filter.json
rename to doc/api_samples/os-migrations/v2.59/migrations-get-with-changes-since.json
diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-before.json b/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-before.json
new file mode 100644
index 00000000000..e829087f87d
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-before.json
@@ -0,0 +1,30 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-01-29T11:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "bookmark"
+ }
+ ],
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T11:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-since.json b/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-since.json
new file mode 100644
index 00000000000..7d36fe4548b
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-since.json
@@ -0,0 +1,36 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-06-23T14:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 4,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "migrating",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T14:42:02.000000",
+ "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650"
+ },
+ {
+ "created_at": "2016-06-23T13:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 3,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "error",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T13:42:02.000000",
+ "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get-with-limit.json b/doc/api_samples/os-migrations/v2.66/migrations-get-with-limit.json
new file mode 100644
index 00000000000..328106bb3f1
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.66/migrations-get-with-limit.json
@@ -0,0 +1,26 @@
+ {
+ "migrations": [
+ {
+ "created_at": "2016-06-23T14:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 4,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "migrating",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T14:42:02.000000",
+ "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650"
+ }
+ ],
+ "migrations_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-migrations?limit=1&marker=42341d4b-346a-40d0-83c6-5f4f6892b650",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get-with-marker.json b/doc/api_samples/os-migrations/v2.66/migrations-get-with-marker.json
new file mode 100644
index 00000000000..e829087f87d
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.66/migrations-get-with-marker.json
@@ -0,0 +1,30 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-01-29T11:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "bookmark"
+ }
+ ],
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T11:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get.json b/doc/api_samples/os-migrations/v2.66/migrations-get.json
new file mode 100644
index 00000000000..42ffca89638
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.66/migrations-get.json
@@ -0,0 +1,78 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-06-23T14:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 4,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "migrating",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T14:42:02.000000",
+ "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650"
+ },
+ {
+ "created_at": "2016-06-23T13:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 3,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "error",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T13:42:02.000000",
+ "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650"
+ },
+ {
+ "created_at": "2016-01-29T12:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 2,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "error",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T12:42:02.000000",
+ "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650"
+ },
+ {
+ "created_at": "2016-01-29T11:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "bookmark"
+ }
+ ],
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T11:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-before.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-before.json
new file mode 100644
index 00000000000..359d965c903
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-before.json
@@ -0,0 +1,32 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-01-29T11:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "bookmark"
+ }
+ ],
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T11:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e",
+ "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-since.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-since.json
new file mode 100644
index 00000000000..86c52f863f1
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-since.json
@@ -0,0 +1,40 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-06-23T14:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 4,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "migrating",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T14:42:02.000000",
+ "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2",
+ "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8"
+ },
+ {
+ "created_at": "2016-06-23T13:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 3,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "error",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T13:42:02.000000",
+ "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2",
+ "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json
new file mode 100644
index 00000000000..a2ed0e1e05b
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json
@@ -0,0 +1,28 @@
+ {
+ "migrations": [
+ {
+ "created_at": "2016-06-23T14:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 4,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "migrating",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T14:42:02.000000",
+ "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2",
+ "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8"
+ }
+ ],
+ "migrations_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-migrations?limit=1&marker=42341d4b-346a-40d0-83c6-5f4f6892b650",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-marker.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-marker.json
new file mode 100644
index 00000000000..359d965c903
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-marker.json
@@ -0,0 +1,32 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-01-29T11:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "bookmark"
+ }
+ ],
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T11:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e",
+ "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-user-or-project-id.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-user-or-project-id.json
new file mode 100644
index 00000000000..f7994fd400c
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-user-or-project-id.json
@@ -0,0 +1,50 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-01-29T12:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 2,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "error",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T12:42:02.000000",
+ "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e",
+ "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4"
+ },
+ {
+ "created_at": "2016-01-29T11:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "bookmark"
+ }
+ ],
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T11:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e",
+ "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get.json b/doc/api_samples/os-migrations/v2.80/migrations-get.json
new file mode 100644
index 00000000000..ca568263946
--- /dev/null
+++ b/doc/api_samples/os-migrations/v2.80/migrations-get.json
@@ -0,0 +1,86 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-06-23T14:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 4,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "migrating",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T14:42:02.000000",
+ "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2",
+ "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8"
+ },
+ {
+ "created_at": "2016-06-23T13:42:02.000000",
+ "dest_compute": "compute20",
+ "dest_host": "5.6.7.8",
+ "dest_node": "node20",
+ "id": 3,
+ "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1",
+ "new_instance_type_id": 6,
+ "old_instance_type_id": 5,
+ "source_compute": "compute10",
+ "source_node": "node10",
+ "status": "error",
+ "migration_type": "resize",
+ "updated_at": "2016-06-23T13:42:02.000000",
+ "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2",
+ "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8"
+ },
+ {
+ "created_at": "2016-01-29T12:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 2,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "error",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T12:42:02.000000",
+ "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e",
+ "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4"
+ },
+ {
+ "created_at": "2016-01-29T11:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1",
+ "rel": "bookmark"
+ }
+ ],
+ "new_instance_type_id": 1,
+ "old_instance_type_id": 1,
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "migration_type": "live-migration",
+ "updated_at": "2016-01-29T11:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e",
+ "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-networks/network-show-resp.json b/doc/api_samples/os-networks/network-show-resp.json
index 78b34950173..d6d12e41936 100644
--- a/doc/api_samples/os-networks/network-show-resp.json
+++ b/doc/api_samples/os-networks/network-show-resp.json
@@ -1,36 +1,36 @@
{
"network": {
- "bridge": "br100",
- "bridge_interface": "eth0",
- "broadcast": "10.0.0.7",
- "cidr": "10.0.0.0/29",
+ "bridge": null,
+ "bridge_interface": null,
+ "broadcast": null,
+ "cidr": null,
"cidr_v6": null,
- "created_at": "2011-08-15T06:19:19.387525",
- "deleted": false,
+ "created_at": null,
+ "deleted": null,
"deleted_at": null,
- "dhcp_server": "10.0.0.1",
- "dhcp_start": "10.0.0.3",
+ "dhcp_server": null,
+ "dhcp_start": null,
"dns1": null,
"dns2": null,
- "enable_dhcp": true,
- "gateway": "10.0.0.1",
+ "enable_dhcp": null,
+ "gateway": null,
"gateway_v6": null,
- "host": "nsokolov-desktop",
+ "host": null,
"id": "20c8acc0-f747-4d71-a389-46d078ebf047",
- "injected": false,
- "label": "mynet_0",
+ "injected": null,
+ "label": "private",
"mtu": null,
- "multi_host": false,
- "netmask": "255.255.255.248",
+ "multi_host": null,
+ "netmask": null,
"netmask_v6": null,
"priority": null,
- "project_id": "6133f8b603924f45bc0c9e21f6df12fa",
+ "project_id": null,
"rxtx_base": null,
- "share_address": false,
- "updated_at": "2011-08-16T09:26:13.048257",
- "vlan": 100,
- "vpn_private_address": "10.0.0.2",
- "vpn_public_address": "127.0.0.1",
- "vpn_public_port": 1000
+ "share_address": null,
+ "updated_at": null,
+ "vlan": null,
+ "vpn_private_address": null,
+ "vpn_public_address": null,
+ "vpn_public_port": null
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-networks/networks-list-resp.json b/doc/api_samples/os-networks/networks-list-resp.json
index 655fcaa8cca..886beb71708 100644
--- a/doc/api_samples/os-networks/networks-list-resp.json
+++ b/doc/api_samples/os-networks/networks-list-resp.json
@@ -1,72 +1,38 @@
{
"networks": [
{
- "bridge": "br100",
- "bridge_interface": "eth0",
- "broadcast": "10.0.0.7",
- "cidr": "10.0.0.0/29",
+ "bridge": null,
+ "bridge_interface": null,
+ "broadcast": null,
+ "cidr": null,
"cidr_v6": null,
- "created_at": "2011-08-15T06:19:19.387525",
- "deleted": false,
+ "created_at": null,
+ "deleted": null,
"deleted_at": null,
- "dhcp_server": "10.0.0.1",
- "dhcp_start": "10.0.0.3",
+ "dhcp_server": null,
+ "dhcp_start": null,
"dns1": null,
"dns2": null,
- "enable_dhcp": true,
- "gateway": "10.0.0.1",
- "gateway_v6": null,
- "host": "nsokolov-desktop",
- "id": "20c8acc0-f747-4d71-a389-46d078ebf047",
- "injected": false,
- "label": "mynet_0",
- "mtu": null,
- "multi_host": false,
- "netmask": "255.255.255.248",
- "netmask_v6": null,
- "priority": null,
- "project_id": "6133f8b603924f45bc0c9e21f6df12fa",
- "rxtx_base": null,
- "share_address": false,
- "updated_at": "2011-08-16T09:26:13.048257",
- "vlan": 100,
- "vpn_private_address": "10.0.0.2",
- "vpn_public_address": "127.0.0.1",
- "vpn_public_port": 1000
- },
- {
- "bridge": "br101",
- "bridge_interface": "eth0",
- "broadcast": "10.0.0.15",
- "cidr": "10.0.0.10/29",
- "cidr_v6": null,
- "created_at": "2011-08-15T06:19:19.885495",
- "deleted": false,
- "deleted_at": null,
- "dhcp_server": "10.0.0.9",
- "dhcp_start": "10.0.0.11",
- "dns1": null,
- "dns2": null,
- "enable_dhcp": true,
- "gateway": "10.0.0.9",
+ "enable_dhcp": null,
+ "gateway": null,
"gateway_v6": null,
"host": null,
- "id": "20c8acc0-f747-4d71-a389-46d078ebf000",
- "injected": false,
- "label": "mynet_1",
+ "id": "20c8acc0-f747-4d71-a389-46d078ebf047",
+ "injected": null,
+ "label": "private",
"mtu": null,
- "multi_host": false,
- "netmask": "255.255.255.248",
+ "multi_host": null,
+ "netmask": null,
"netmask_v6": null,
"priority": null,
"project_id": null,
"rxtx_base": null,
- "share_address": false,
+ "share_address": null,
"updated_at": null,
- "vlan": 101,
- "vpn_private_address": "10.0.0.10",
+ "vlan": null,
+ "vpn_private_address": null,
"vpn_public_address": null,
- "vpn_public_port": 1001
+ "vpn_public_port": null
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json b/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json
index 38ef12daaa8..f65af997aae 100644
--- a/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json
+++ b/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json
@@ -6,7 +6,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json
index 2fcfa2b9dd6..9a9fe365072 100644
--- a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json
@@ -2,7 +2,7 @@
"quota_class_set": {
"cores": 20,
"fixed_ips": -1,
- "floating_ips": 10,
+ "floating_ips": -1,
"id": "test_class",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
@@ -11,8 +11,7 @@
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
- "security_group_rules": 20,
- "security_groups": 10,
- "networks": 3
+ "security_group_rules": -1,
+ "security_groups": -1
}
}
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json
index 4e3af3fa86b..736489fdf73 100644
--- a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json
@@ -3,15 +3,14 @@
"instances": 50,
"cores": 50,
"ram": 51200,
- "floating_ips": 10,
+ "floating_ips": -1,
"fixed_ips": -1,
"metadata_items": 128,
"injected_files": 5,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
- "security_groups": 10,
- "security_group_rules": 20,
- "key_pairs": 100,
- "networks": 3
+ "security_groups": -1,
+ "security_group_rules": -1,
+ "key_pairs": 100
}
}
diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json
index c58474b539b..90c88fc450d 100644
--- a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json
+++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json
@@ -2,7 +2,7 @@
"quota_class_set": {
"cores": 50,
"fixed_ips": -1,
- "floating_ips": 10,
+ "floating_ips": -1,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
@@ -10,8 +10,7 @@
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
- "security_group_rules": 20,
- "security_groups": 10,
- "networks": 3
+ "security_group_rules": -1,
+ "security_groups": -1
}
}
diff --git a/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json
index 620fa8ed357..714ca5923ae 100644
--- a/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json
+++ b/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json
@@ -10,11 +10,10 @@
"instances": -1,
"key_pairs": -1,
"metadata_items": -1,
- "networks": -1,
"ram": -1,
"security_group_rules": -1,
"security_groups": -1,
"server_group_members": -1,
"server_groups": -1
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json
index 47af14b048f..59fd38c7812 100644
--- a/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json
+++ b/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json
@@ -46,11 +46,6 @@
"limit": -1,
"reserved": -1
},
- "networks": {
- "in_use": -1,
- "limit": -1,
- "reserved": -1
- },
"ram": {
"in_use": -1,
"limit": -1,
@@ -77,4 +72,4 @@
"reserved": -1
}
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json
index 620fa8ed357..714ca5923ae 100644
--- a/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json
+++ b/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json
@@ -10,11 +10,10 @@
"instances": -1,
"key_pairs": -1,
"metadata_items": -1,
- "networks": -1,
"ram": -1,
"security_group_rules": -1,
"security_groups": -1,
"server_group_members": -1,
"server_groups": -1
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json
index 0a4c3e9008d..370bd87ad1e 100644
--- a/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json
+++ b/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json
@@ -9,11 +9,10 @@
"instances": -1,
"key_pairs": -1,
"metadata_items": -1,
- "networks": -1,
"ram": -1,
"security_group_rules": -1,
"security_groups": -1,
"server_group_members": -1,
"server_groups": -1
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json
index 0a4c3e9008d..370bd87ad1e 100644
--- a/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json
+++ b/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json
@@ -9,11 +9,10 @@
"instances": -1,
"key_pairs": -1,
"metadata_items": -1,
- "networks": -1,
"ram": -1,
"security_group_rules": -1,
"security_groups": -1,
"server_group_members": -1,
"server_groups": -1
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json b/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json
index 620fa8ed357..714ca5923ae 100644
--- a/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json
+++ b/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json
@@ -10,11 +10,10 @@
"instances": -1,
"key_pairs": -1,
"metadata_items": -1,
- "networks": -1,
"ram": -1,
"security_group_rules": -1,
"security_groups": -1,
"server_group_members": -1,
"server_groups": -1
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json b/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json
index 0a4c3e9008d..370bd87ad1e 100644
--- a/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json
+++ b/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json
@@ -9,11 +9,10 @@
"instances": -1,
"key_pairs": -1,
"metadata_items": -1,
- "networks": -1,
"ram": -1,
"security_group_rules": -1,
"security_groups": -1,
"server_group_members": -1,
"server_groups": -1
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
index e03bc651eba..67771a0a8c0 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
@@ -2,7 +2,7 @@
"quota_set": {
"cores": 20,
"fixed_ips": -1,
- "floating_ips": 10,
+ "floating_ips": -1,
"id": "fake_tenant",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
@@ -11,10 +11,9 @@
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
- "security_group_rules": 20,
- "security_groups": 10,
+ "security_group_rules": -1,
+ "security_groups": -1,
"server_groups": 10,
- "server_group_members": 10,
- "networks": 3
+ "server_group_members": 10
}
}
diff --git a/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json
index bca3a1e82a0..65ca1bf5b68 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json
@@ -12,7 +12,7 @@
},
"floating_ips": {
"in_use": 0,
- "limit": 10,
+ "limit": -1,
"reserved": 0
},
"id": "fake_tenant",
@@ -53,12 +53,12 @@
},
"security_group_rules": {
"in_use": 0,
- "limit": 20,
+ "limit": -1,
"reserved": 0
},
"security_groups": {
"in_use": 0,
- "limit": 10,
+ "limit": -1,
"reserved": 0
},
"server_group_members": {
@@ -70,11 +70,6 @@
"in_use": 0,
"limit": 10,
"reserved": 0
- },
- "networks": {
- "in_use": 2,
- "limit": 3,
- "reserved": 0
}
}
}
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
index e03bc651eba..67771a0a8c0 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
@@ -2,7 +2,7 @@
"quota_set": {
"cores": 20,
"fixed_ips": -1,
- "floating_ips": 10,
+ "floating_ips": -1,
"id": "fake_tenant",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
@@ -11,10 +11,9 @@
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
- "security_group_rules": 20,
- "security_groups": 10,
+ "security_group_rules": -1,
+ "security_groups": -1,
"server_groups": 10,
- "server_group_members": 10,
- "networks": 3
+ "server_group_members": 10
}
}
diff --git a/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json
index ab901db2c92..2811ac02b20 100644
--- a/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json
@@ -2,7 +2,7 @@
"quota_set": {
"cores": 20,
"fixed_ips": -1,
- "floating_ips": 10,
+ "floating_ips": -1,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
@@ -10,10 +10,9 @@
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
- "security_group_rules": 20,
- "security_groups": 10,
+ "security_group_rules": -1,
+ "security_groups": -1,
"server_groups": 10,
- "server_group_members": 10,
- "networks": 3
+ "server_group_members": 10
}
}
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-req.json b/doc/api_samples/os-quota-sets/quotas-update-post-req.json
index 0b78cff3095..2a9517bea4e 100644
--- a/doc/api_samples/os-quota-sets/quotas-update-post-req.json
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-req.json
@@ -1,5 +1,5 @@
{
"quota_set": {
- "security_groups": 45
+ "cores": 45
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
index f17ad087f17..93c877aa754 100644
--- a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
@@ -1,8 +1,8 @@
{
"quota_set": {
- "cores": 20,
+ "cores": 45,
"fixed_ips": -1,
- "floating_ips": 10,
+ "floating_ips": -1,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
@@ -10,10 +10,9 @@
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
- "security_group_rules": 20,
- "security_groups": 45,
+ "security_group_rules": -1,
+ "security_groups": -1,
"server_groups": 10,
- "server_group_members": 10,
- "networks": 3
+ "server_group_members": 10
}
}
diff --git a/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json
index e03bc651eba..67771a0a8c0 100644
--- a/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json
+++ b/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json
@@ -2,7 +2,7 @@
"quota_set": {
"cores": 20,
"fixed_ips": -1,
- "floating_ips": 10,
+ "floating_ips": -1,
"id": "fake_tenant",
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
@@ -11,10 +11,9 @@
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
- "security_group_rules": 20,
- "security_groups": 10,
+ "security_group_rules": -1,
+ "security_groups": -1,
"server_groups": 10,
- "server_group_members": 10,
- "networks": 3
+ "server_group_members": 10
}
}
diff --git a/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json
index 4ac251c0868..92252d1a434 100644
--- a/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json
+++ b/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json
@@ -2,7 +2,7 @@
"quota_set": {
"cores": 20,
"fixed_ips": -1,
- "floating_ips": 10,
+ "floating_ips": -1,
"injected_file_content_bytes": 10240,
"injected_file_path_bytes": 255,
"injected_files": 5,
@@ -10,10 +10,9 @@
"key_pairs": 100,
"metadata_items": 128,
"ram": 51200,
- "security_group_rules": 20,
- "security_groups": 10,
+ "security_group_rules": -1,
+ "security_groups": -1,
"server_groups": 10,
- "server_group_members": 10,
- "networks": 3
+ "server_group_members": 10
}
}
diff --git a/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json b/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json
index fe15b779335..faa6ce30225 100644
--- a/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json
+++ b/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json
@@ -1,6 +1,6 @@
{
"console": {
"type": "novnc",
- "url": "http://127.0.0.1:6080/vnc_auto.html?token=191996c3-7b0f-42f3-95a7-f1839f2da6ed"
+ "url": "http://127.0.0.1:6080/vnc_auto.html?path=%3Ftoken%3Ddaae261f-474d-4cae-8f6a-1865278ed8c9"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json b/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json
index b427a690222..12eade5a2ba 100644
--- a/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json
+++ b/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json
@@ -2,6 +2,6 @@
"remote_console": {
"protocol": "vnc",
"type": "novnc",
- "url": "http://example.com:6080/vnc_auto.html?token=b60bcfc3-5fd4-4d21-986c-e83379107819"
+ "url": "http://example.com:6080/vnc_auto.html?path=%3Ftoken%3Db60bcfc3-5fd4-4d21-986c-e83379107819"
}
}
diff --git a/doc/api_samples/os-rescue/server-get-resp-rescue.json b/doc/api_samples/os-rescue/server-get-resp-rescue.json
index 8780bc668d3..9a99aa8824a 100644
--- a/doc/api_samples/os-rescue/server-get-resp-rescue.json
+++ b/doc/api_samples/os-rescue/server-get-resp-rescue.json
@@ -5,8 +5,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -54,10 +54,7 @@
"user_id": "fake",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
- "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-STS:power_state": 4,
"OS-EXT-STS:task_state": null,
"OS-EXT-STS:vm_state": "rescued",
diff --git a/doc/api_samples/os-rescue/server-get-resp-unrescue.json b/doc/api_samples/os-rescue/server-get-resp-unrescue.json
index a8c9f271aaf..581dc19e018 100644
--- a/doc/api_samples/os-rescue/server-get-resp-unrescue.json
+++ b/doc/api_samples/os-rescue/server-get-resp-unrescue.json
@@ -5,8 +5,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -55,10 +55,7 @@
"user_id": "fake",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
- "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
"OS-EXT-STS:vm_state": "active",
diff --git a/doc/api_samples/os-rescue/server-unrescue-req.json b/doc/api_samples/os-rescue/server-unrescue-req.json
index cafc9b13a84..635fb7a25ed 100644
--- a/doc/api_samples/os-rescue/server-unrescue-req.json
+++ b/doc/api_samples/os-rescue/server-unrescue-req.json
@@ -1,3 +1,3 @@
{
"unrescue": null
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json b/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json
new file mode 100644
index 00000000000..4fc5ce6f1e7
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json
@@ -0,0 +1,76 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-STS:power_state": 4,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "rescued",
+ "OS-SRV-USG:launched_at": "2020-02-07T17:39:49.259481",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2020-02-07T17:39:48Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "69bebe1c-3bdb-4feb-9b79-afa3d4782d95",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/69bebe1c-3bdb-4feb-9b79-afa3d4782d95",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/69bebe1c-3bdb-4feb-9b79-afa3d4782d95",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "RESCUE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2020-02-07T17:39:49Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json b/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json
new file mode 100644
index 00000000000..2d54aa13821
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json
@@ -0,0 +1,77 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2020-02-07T17:39:55.632592",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2020-02-07T17:39:54Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "5a0ffa96-ae59-4f82-b7a6-e0c9007cd576",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/5a0ffa96-ae59-4f82-b7a6-e0c9007cd576",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/5a0ffa96-ae59-4f82-b7a6-e0c9007cd576",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2020-02-07T17:39:56Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json b/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json
new file mode 100644
index 00000000000..1cfab528728
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json
@@ -0,0 +1,6 @@
+{
+ "rescue": {
+ "adminPass": "MySecretPass",
+ "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue-req.json b/doc/api_samples/os-rescue/v2.87/server-rescue-req.json
new file mode 100644
index 00000000000..3796600282f
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-rescue-req.json
@@ -0,0 +1,5 @@
+{
+ "rescue": {
+ "adminPass": "MySecretPass"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue.json b/doc/api_samples/os-rescue/v2.87/server-rescue.json
new file mode 100644
index 00000000000..6cd942395fe
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-rescue.json
@@ -0,0 +1,3 @@
+{
+ "adminPass": "MySecretPass"
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json b/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json
new file mode 100644
index 00000000000..cafc9b13a84
--- /dev/null
+++ b/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json
@@ -0,0 +1,3 @@
+{
+ "unrescue": null
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json b/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json
index 1fdc541a73e..9c20c9f0865 100644
--- a/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json
+++ b/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json
@@ -6,8 +6,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -58,17 +58,7 @@
"description": null,
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
- "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
- "OS-EXT-SRV-ATTR:reservation_id": "r-00000001",
- "OS-EXT-SRV-ATTR:launch_index": 0,
- "OS-EXT-SRV-ATTR:kernel_id": "",
- "OS-EXT-SRV-ATTR:ramdisk_id": "",
- "OS-EXT-SRV-ATTR:hostname": "fake-hostname",
- "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
- "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
"OS-EXT-STS:vm_state": "active",
@@ -79,7 +69,6 @@
{
"name": "default"
}
- ],
- "host_status": "UP"
+ ]
}
}
diff --git a/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json b/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json
index c1ea45a29ce..1e9cf8f99a5 100644
--- a/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json
+++ b/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json
@@ -6,8 +6,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -59,17 +59,7 @@
"description": null,
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
- "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
- "OS-EXT-SRV-ATTR:reservation_id": "r-00000001",
- "OS-EXT-SRV-ATTR:launch_index": 0,
- "OS-EXT-SRV-ATTR:kernel_id": "",
- "OS-EXT-SRV-ATTR:ramdisk_id": "",
- "OS-EXT-SRV-ATTR:hostname": "fake-hostname",
- "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
- "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
"OS-EXT-STS:vm_state": "active",
@@ -80,8 +70,7 @@
{
"name": "default"
}
- ],
- "host_status": "UP"
+ ]
}
]
}
diff --git a/doc/api_samples/os-server-topology/v2.78/servers-topology-resp-user.json b/doc/api_samples/os-server-topology/v2.78/servers-topology-resp-user.json
new file mode 100644
index 00000000000..0d3677c6c4d
--- /dev/null
+++ b/doc/api_samples/os-server-topology/v2.78/servers-topology-resp-user.json
@@ -0,0 +1,31 @@
+{
+ "nodes": [
+ {
+ "memory_mb": 1024,
+ "siblings": [
+ [
+ 0,
+ 1
+ ]
+ ],
+ "vcpu_set": [
+ 0,
+ 1
+ ]
+ },
+ {
+ "memory_mb": 2048,
+ "siblings": [
+ [
+ 2,
+ 3
+ ]
+ ],
+ "vcpu_set": [
+ 2,
+ 3
+ ]
+ }
+ ],
+ "pagesize_kb": 4
+}
diff --git a/doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json b/doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json
new file mode 100644
index 00000000000..a918a2ade59
--- /dev/null
+++ b/doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json
@@ -0,0 +1,41 @@
+{
+ "nodes": [
+ {
+ "cpu_pinning": {
+ "0": 0,
+ "1": 5
+ },
+ "host_node": 0,
+ "memory_mb": 1024,
+ "siblings": [
+ [
+ 0,
+ 1
+ ]
+ ],
+ "vcpu_set": [
+ 0,
+ 1
+ ]
+ },
+ {
+ "cpu_pinning": {
+ "2": 1,
+ "3": 8
+ },
+ "host_node": 1,
+ "memory_mb": 2048,
+ "siblings": [
+ [
+ 2,
+ 3
+ ]
+ ],
+ "vcpu_set": [
+ 2,
+ 3
+ ]
+ }
+ ],
+ "pagesize_kb": 4
+}
diff --git a/doc/api_samples/os-services/v2.69/services-list-get-resp.json b/doc/api_samples/os-services/v2.69/services-list-get-resp.json
new file mode 100644
index 00000000000..6b06ba63eb7
--- /dev/null
+++ b/doc/api_samples/os-services/v2.69/services-list-get-resp.json
@@ -0,0 +1,14 @@
+{
+ "services": [
+ {
+ "binary": "nova-compute",
+ "host": "host1",
+ "status": "UNKNOWN"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host2",
+ "status": "UNKNOWN"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-shelve/v2.77/os-shelve.json b/doc/api_samples/os-shelve/v2.77/os-shelve.json
new file mode 100644
index 00000000000..e33b05865ac
--- /dev/null
+++ b/doc/api_samples/os-shelve/v2.77/os-shelve.json
@@ -0,0 +1,3 @@
+{
+ "shelve": null
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-shelve/v2.77/os-unshelve-null.json b/doc/api_samples/os-shelve/v2.77/os-unshelve-null.json
new file mode 100644
index 00000000000..fd05c2a2fe6
--- /dev/null
+++ b/doc/api_samples/os-shelve/v2.77/os-unshelve-null.json
@@ -0,0 +1,3 @@
+{
+ "unshelve": null
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-shelve/v2.77/os-unshelve.json b/doc/api_samples/os-shelve/v2.77/os-unshelve.json
new file mode 100644
index 00000000000..8ca146b5933
--- /dev/null
+++ b/doc/api_samples/os-shelve/v2.77/os-unshelve.json
@@ -0,0 +1,5 @@
+{
+ "unshelve": {
+ "availability_zone": "us-west"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-all.json b/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-all.json
new file mode 100644
index 00000000000..d6a1be5cc64
--- /dev/null
+++ b/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-all.json
@@ -0,0 +1,68 @@
+{
+ "tenant_usages": [
+ {
+ "server_usages": [
+ {
+ "ended_at": null,
+ "flavor": "m1.tiny",
+ "hours": 1.0,
+ "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f06",
+ "local_gb": 1,
+ "memory_mb": 512,
+ "name": "instance-3",
+ "started_at": "2018-10-09T11:29:04.166194",
+ "state": "active",
+ "tenant_id": "0000000e737461636b20342065000000",
+ "uptime": 3600,
+ "vcpus": 1
+ }
+ ],
+ "start": "2018-10-09T11:29:04.166194",
+ "stop": "2018-10-09T12:29:04.166194",
+ "tenant_id": "0000000e737461636b20342065000000",
+ "total_hours": 1.0,
+ "total_local_gb_usage": 1.0,
+ "total_memory_mb_usage": 512.0,
+ "total_vcpus_usage": 1.0
+ },
+ {
+ "server_usages": [
+ {
+ "ended_at": null,
+ "flavor": "m1.tiny",
+ "hours": 1.0,
+ "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f00",
+ "local_gb": 1,
+ "memory_mb": 512,
+ "name": "instance-1",
+ "started_at": "2018-10-09T11:29:04.166194",
+ "state": "active",
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "uptime": 3600,
+ "vcpus": 1
+ },
+ {
+ "ended_at": null,
+ "flavor": "m1.tiny",
+ "hours": 1.0,
+ "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f03",
+ "local_gb": 1,
+ "memory_mb": 512,
+ "name": "instance-2",
+ "started_at": "2018-10-09T11:29:04.166194",
+ "state": "active",
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "uptime": 3600,
+ "vcpus": 1
+ }
+ ],
+ "start": "2018-10-09T11:29:04.166194",
+ "stop": "2018-10-09T12:29:04.166194",
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "total_hours": 2.0,
+ "total_local_gb_usage": 2.0,
+ "total_memory_mb_usage": 1024.0,
+ "total_vcpus_usage": 2.0
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-tenant-networks/networks-list-res.json b/doc/api_samples/os-tenant-networks/networks-list-res.json
index b857e8112af..006663ded1b 100644
--- a/doc/api_samples/os-tenant-networks/networks-list-res.json
+++ b/doc/api_samples/os-tenant-networks/networks-list-res.json
@@ -1,14 +1,9 @@
{
"networks": [
{
- "cidr": "10.0.0.0/29",
- "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
- "label": "test_0"
- },
- {
- "cidr": "10.0.0.8/29",
- "id": "616fb98f-46ca-475e-917e-2563e5a8cd20",
- "label": "test_1"
+ "cidr": "None",
+ "id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "label": "private"
}
]
}
diff --git a/doc/api_samples/os-volumes/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/attach-volume-to-server-req.json
index 4062687fc3b..f2d5f69bd52 100644
--- a/doc/api_samples/os-volumes/attach-volume-to-server-req.json
+++ b/doc/api_samples/os-volumes/attach-volume-to-server-req.json
@@ -1,6 +1,6 @@
{
"volumeAttachment": {
- "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803",
- "device": "/dev/vdd"
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "device": "/dev/sdb"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/attach-volume-to-server-resp.json
index 2e512ac9903..5408fb8a995 100644
--- a/doc/api_samples/os-volumes/attach-volume-to-server-resp.json
+++ b/doc/api_samples/os-volumes/attach-volume-to-server-resp.json
@@ -1,8 +1,8 @@
{
"volumeAttachment": {
- "device": "/dev/vdd",
- "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803",
- "serverId": "0c92f3f6-c253-4c9b-bd43-e880a8d2eb0a",
- "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "802db873-0373-4bdd-a433-d272a539ba18",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/list-volume-attachments-resp.json
index 9ae9b4a2aa8..3ad77cf7de8 100644
--- a/doc/api_samples/os-volumes/list-volume-attachments-resp.json
+++ b/doc/api_samples/os-volumes/list-volume-attachments-resp.json
@@ -1,16 +1,16 @@
{
"volumeAttachments": [
{
- "device": "/dev/sdd",
- "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803",
- "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0",
- "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ "device": "/dev/sdc",
+ "id": "227cc671-f30b-4488-96fd-7d0bf13648d8",
+ "serverId": "4b293d31-ebd5-4a7f-be03-874b90021e54",
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
},
{
- "device": "/dev/sdc",
- "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804",
- "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0",
- "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804"
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "4b293d31-ebd5-4a7f-be03-874b90021e54",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
}
]
}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/update-volume-req.json b/doc/api_samples/os-volumes/update-volume-req.json
index bba735eec83..e5ad47aa3cc 100644
--- a/doc/api_samples/os-volumes/update-volume-req.json
+++ b/doc/api_samples/os-volumes/update-volume-req.json
@@ -1,5 +1,5 @@
{
"volumeAttachment": {
- "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f805"
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json
index 9f49b54d78c..fdf928be694 100644
--- a/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json
+++ b/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json
@@ -1,6 +1,6 @@
{
"volumeAttachment": {
- "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
"tag": "foo"
}
-}
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-resp.json
index 5f610bcaebe..1e5aa6b1a63 100644
--- a/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-resp.json
+++ b/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-resp.json
@@ -1,8 +1,8 @@
{
"volumeAttachment": {
"device": "/dev/sdb",
- "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803",
- "serverId": "84ffbfa0-daf4-4e23-bf4b-dc532c459d4e",
- "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "69d19439-fa5f-4d6e-8b78-1868e7eb93a5",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
}
-}
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.49/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.49/list-volume-attachments-resp.json
new file mode 100644
index 00000000000..18a5aad803a
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.49/list-volume-attachments-resp.json
@@ -0,0 +1,16 @@
+{
+ "volumeAttachments": [
+ {
+ "device": "/dev/sdc",
+ "id": "227cc671-f30b-4488-96fd-7d0bf13648d8",
+ "serverId": "1453a6a8-10ec-4797-9b9e-da3c703579d5",
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
+ },
+ {
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "1453a6a8-10ec-4797-9b9e-da3c703579d5",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.49/update-volume-req.json b/doc/api_samples/os-volumes/v2.49/update-volume-req.json
new file mode 100644
index 00000000000..e5ad47aa3cc
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.49/update-volume-req.json
@@ -0,0 +1,5 @@
+{
+ "volumeAttachment": {
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.49/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.49/volume-attachment-detail-resp.json
new file mode 100644
index 00000000000..af9e64d4c24
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.49/volume-attachment-detail-resp.json
@@ -0,0 +1,8 @@
+{
+ "volumeAttachment": {
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "9ad0352c-48ff-4290-9db8-3385a676f035",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-req.json
new file mode 100644
index 00000000000..fdf928be694
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-req.json
@@ -0,0 +1,6 @@
+{
+ "volumeAttachment": {
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "tag": "foo"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-resp.json
new file mode 100644
index 00000000000..5c03cbc232b
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-resp.json
@@ -0,0 +1,9 @@
+{
+ "volumeAttachment": {
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "70f5c62a-972d-4a8b-abcf-e1375ca7f8c0",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.70/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.70/list-volume-attachments-resp.json
new file mode 100644
index 00000000000..f17cc8e2d87
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.70/list-volume-attachments-resp.json
@@ -0,0 +1,18 @@
+{
+ "volumeAttachments": [
+ {
+ "device": "/dev/sdc",
+ "id": "227cc671-f30b-4488-96fd-7d0bf13648d8",
+ "serverId": "68426b0f-511b-4cb3-8169-bba2e7a8bc89",
+ "tag": null,
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
+ },
+ {
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "68426b0f-511b-4cb3-8169-bba2e7a8bc89",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.70/update-volume-req.json b/doc/api_samples/os-volumes/v2.70/update-volume-req.json
new file mode 100644
index 00000000000..e5ad47aa3cc
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.70/update-volume-req.json
@@ -0,0 +1,5 @@
+{
+ "volumeAttachment": {
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.70/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.70/volume-attachment-detail-resp.json
new file mode 100644
index 00000000000..650ede480e8
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.70/volume-attachment-detail-resp.json
@@ -0,0 +1,9 @@
+{
+ "volumeAttachment": {
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "d989feee-002d-40f6-b47d-f0dbee48bbc1",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-req.json
new file mode 100644
index 00000000000..b4429e12e96
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-req.json
@@ -0,0 +1,7 @@
+{
+ "volumeAttachment": {
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "tag": "foo",
+ "delete_on_termination": true
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-resp.json
new file mode 100644
index 00000000000..3a60cdc0d09
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-resp.json
@@ -0,0 +1,10 @@
+{
+ "volumeAttachment": {
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "09b3b9d1-b8c5-48e1-841d-62c3ef967a88",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json
new file mode 100644
index 00000000000..ffe7c0baf1e
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json
@@ -0,0 +1,20 @@
+{
+ "volumeAttachments": [
+ {
+ "delete_on_termination": false,
+ "device": "/dev/sdc",
+ "id": "227cc671-f30b-4488-96fd-7d0bf13648d8",
+ "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9",
+ "tag": null,
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
+ },
+ {
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.79/update-volume-req.json b/doc/api_samples/os-volumes/v2.79/update-volume-req.json
new file mode 100644
index 00000000000..e5ad47aa3cc
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.79/update-volume-req.json
@@ -0,0 +1,5 @@
+{
+ "volumeAttachment": {
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json
new file mode 100644
index 00000000000..4a54243c2b1
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json
@@ -0,0 +1,10 @@
+{
+ "volumeAttachment": {
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "2aad99d3-7aa4-41e9-b4e6-3f960b115d68",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json
new file mode 100644
index 00000000000..b4429e12e96
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json
@@ -0,0 +1,7 @@
+{
+ "volumeAttachment": {
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "tag": "foo",
+ "delete_on_termination": true
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json
new file mode 100644
index 00000000000..3a60cdc0d09
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json
@@ -0,0 +1,10 @@
+{
+ "volumeAttachment": {
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "09b3b9d1-b8c5-48e1-841d-62c3ef967a88",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json
new file mode 100644
index 00000000000..ffe7c0baf1e
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json
@@ -0,0 +1,20 @@
+{
+ "volumeAttachments": [
+ {
+ "delete_on_termination": false,
+ "device": "/dev/sdc",
+ "id": "227cc671-f30b-4488-96fd-7d0bf13648d8",
+ "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9",
+ "tag": null,
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
+ },
+ {
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json b/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json
new file mode 100644
index 00000000000..30105458e7c
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json
@@ -0,0 +1,6 @@
+{
+ "volumeAttachment": {
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "delete_on_termination": true
+ }
+}
diff --git a/doc/api_samples/os-volumes/v2.85/update-volume-req.json b/doc/api_samples/os-volumes/v2.85/update-volume-req.json
new file mode 100644
index 00000000000..e5ad47aa3cc
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.85/update-volume-req.json
@@ -0,0 +1,5 @@
+{
+ "volumeAttachment": {
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json
new file mode 100644
index 00000000000..4a54243c2b1
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json
@@ -0,0 +1,10 @@
+{
+ "volumeAttachment": {
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "2aad99d3-7aa4-41e9-b4e6-3f960b115d68",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-req.json
new file mode 100644
index 00000000000..b4429e12e96
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-req.json
@@ -0,0 +1,7 @@
+{
+ "volumeAttachment": {
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "tag": "foo",
+ "delete_on_termination": true
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-resp.json
new file mode 100644
index 00000000000..0b37f87012e
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-resp.json
@@ -0,0 +1,10 @@
+{
+ "volumeAttachment": {
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "7ebed2ce-85b3-40b5-84ae-8cc725c37ed2",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.89/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.89/list-volume-attachments-resp.json
new file mode 100644
index 00000000000..9935969fbf2
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.89/list-volume-attachments-resp.json
@@ -0,0 +1,22 @@
+{
+ "volumeAttachments": [
+ {
+ "attachment_id": "979ce4f8-033a-409d-85e6-6b5c0f6a6302",
+ "delete_on_termination": false,
+ "device": "/dev/sdc",
+ "serverId": "7696780b-3f53-4688-ab25-019bfcbbd806",
+ "tag": null,
+ "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8",
+ "bdm_uuid": "c088db45-92b8-49e8-81e2-a1b77a144b3b"
+ },
+ {
+ "attachment_id": "c5684109-0311-4fca-9814-350e46ab7d2a",
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "serverId": "7696780b-3f53-4688-ab25-019bfcbbd806",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "bdm_uuid": "1aa24536-6fb5-426c-8894-d627f39aa48b"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-volumes/v2.89/update-volume-attachment-delete-flag-req.json b/doc/api_samples/os-volumes/v2.89/update-volume-attachment-delete-flag-req.json
new file mode 100644
index 00000000000..a2e17f2b6f0
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.89/update-volume-attachment-delete-flag-req.json
@@ -0,0 +1,10 @@
+{
+ "volumeAttachment": {
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "fddf0901-8caf-42c9-b496-133c570b171b",
+ "device": "/dev/sdb",
+ "tag": "foo",
+ "delete_on_termination": true
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-volumes/v2.89/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.89/volume-attachment-detail-resp.json
new file mode 100644
index 00000000000..eda615f9961
--- /dev/null
+++ b/doc/api_samples/os-volumes/v2.89/volume-attachment-detail-resp.json
@@ -0,0 +1,11 @@
+{
+ "volumeAttachment": {
+ "attachment_id": "721a5c82-5ebc-4c6a-8339-3d33d8d027ed",
+ "delete_on_termination": true,
+ "device": "/dev/sdb",
+ "serverId": "7ebed2ce-85b3-40b5-84ae-8cc725c37ed2",
+ "tag": "foo",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "bdm_uuid": "c088db45-92b8-49e8-81e2-a1b77a144b3b"
+ }
+}
diff --git a/doc/api_samples/os-volumes/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/volume-attachment-detail-resp.json
index 5375033bb9c..41b8f21a88c 100644
--- a/doc/api_samples/os-volumes/volume-attachment-detail-resp.json
+++ b/doc/api_samples/os-volumes/volume-attachment-detail-resp.json
@@ -1,8 +1,8 @@
{
"volumeAttachment": {
- "device": "/dev/sdd",
- "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803",
- "serverId": "2390fb4d-1693-45d7-b309-e29c4af16538",
- "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ "device": "/dev/sdb",
+ "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "serverId": "1ad6852e-6605-4510-b639-d0bff864b49a",
+ "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/server-ips/server-ips-network-resp.json b/doc/api_samples/server-ips/server-ips-network-resp.json
index 8669202e745..fdb070ac3cd 100644
--- a/doc/api_samples/server-ips/server-ips-network-resp.json
+++ b/doc/api_samples/server-ips/server-ips-network-resp.json
@@ -1,7 +1,7 @@
{
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
diff --git a/doc/api_samples/server-ips/server-ips-resp.json b/doc/api_samples/server-ips/server-ips-resp.json
index 8a350056139..f710c5777ed 100644
--- a/doc/api_samples/server-ips/server-ips-resp.json
+++ b/doc/api_samples/server-ips/server-ips-resp.json
@@ -2,7 +2,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
diff --git a/doc/api_samples/server-migrations/v2.80/live-migrate-server.json b/doc/api_samples/server-migrations/v2.80/live-migrate-server.json
new file mode 100644
index 00000000000..c2f5bf6c989
--- /dev/null
+++ b/doc/api_samples/server-migrations/v2.80/live-migrate-server.json
@@ -0,0 +1,6 @@
+{
+ "os-migrateLive": {
+ "host": null,
+ "block_migration": "auto"
+ }
+}
diff --git a/doc/api_samples/server-migrations/v2.80/migrations-get.json b/doc/api_samples/server-migrations/v2.80/migrations-get.json
new file mode 100644
index 00000000000..7de0e63201e
--- /dev/null
+++ b/doc/api_samples/server-migrations/v2.80/migrations-get.json
@@ -0,0 +1,23 @@
+{
+ "migration": {
+ "created_at": "2016-01-29T13:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe",
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "memory_total_bytes": 123456,
+ "memory_processed_bytes": 12345,
+ "memory_remaining_bytes": 111111,
+ "disk_total_bytes": 234567,
+ "disk_processed_bytes": 23456,
+ "disk_remaining_bytes": 211111,
+ "updated_at": "2016-01-29T13:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "8dbaa0f0-ab95-4ffe-8cb4-9c89d2ac9d24",
+ "project_id": "5f705771-3aa9-4f4c-8660-0d9522ffdbea"
+ }
+}
diff --git a/doc/api_samples/server-migrations/v2.80/migrations-index.json b/doc/api_samples/server-migrations/v2.80/migrations-index.json
new file mode 100644
index 00000000000..460529a5896
--- /dev/null
+++ b/doc/api_samples/server-migrations/v2.80/migrations-index.json
@@ -0,0 +1,25 @@
+{
+ "migrations": [
+ {
+ "created_at": "2016-01-29T13:42:02.000000",
+ "dest_compute": "compute2",
+ "dest_host": "1.2.3.4",
+ "dest_node": "node2",
+ "id": 1,
+ "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe",
+ "source_compute": "compute1",
+ "source_node": "node1",
+ "status": "running",
+ "memory_total_bytes": 123456,
+ "memory_processed_bytes": 12345,
+ "memory_remaining_bytes": 111111,
+ "disk_total_bytes": 234567,
+ "disk_processed_bytes": 23456,
+ "disk_remaining_bytes": 211111,
+ "updated_at": "2016-01-29T13:42:02.000000",
+ "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650",
+ "user_id": "8dbaa0f0-ab95-4ffe-8cb4-9c89d2ac9d24",
+ "project_id": "5f705771-3aa9-4f4c-8660-0d9522ffdbea"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/server-action-addfloatingip-req.json b/doc/api_samples/servers/server-action-addfloatingip-req.json
index e4ad5638ab2..654c6bda4c8 100644
--- a/doc/api_samples/servers/server-action-addfloatingip-req.json
+++ b/doc/api_samples/servers/server-action-addfloatingip-req.json
@@ -1,6 +1,6 @@
{
"addFloatingIp" : {
"address": "10.10.10.10",
- "fixed_address": "192.168.0.3"
+ "fixed_address": "192.168.1.30"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/servers/server-action-rebuild-resp.json b/doc/api_samples/servers/server-action-rebuild-resp.json
index b66dc4ce80d..a021f888a03 100644
--- a/doc/api_samples/servers/server-action-rebuild-resp.json
+++ b/doc/api_samples/servers/server-action-rebuild-resp.json
@@ -6,7 +6,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -51,6 +51,6 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-11-14T06:29:02Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/servers/server-create-req-v237.json b/doc/api_samples/servers/server-create-req-v237.json
index abffb363e43..8b5c272e782 100644
--- a/doc/api_samples/servers/server-create-req-v237.json
+++ b/doc/api_samples/servers/server-create-req-v237.json
@@ -5,7 +5,7 @@
"name" : "new-server-test",
"imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
"flavorRef" : "1",
- "availability_zone": "nova",
+ "availability_zone": "us-west",
"OS-DCF:diskConfig": "AUTO",
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/doc/api_samples/servers/server-create-req-v257.json b/doc/api_samples/servers/server-create-req-v257.json
index c6d8dec2424..7c5011e4fe1 100644
--- a/doc/api_samples/servers/server-create-req-v257.json
+++ b/doc/api_samples/servers/server-create-req-v257.json
@@ -5,7 +5,7 @@
"name" : "new-server-test",
"imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
"flavorRef" : "http://openstack.example.com/flavors/1",
- "availability_zone": "nova",
+ "availability_zone": "us-west",
"OS-DCF:diskConfig": "AUTO",
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/doc/api_samples/servers/server-create-req.json b/doc/api_samples/servers/server-create-req.json
index 4ac0157a85e..f51255b9065 100644
--- a/doc/api_samples/servers/server-create-req.json
+++ b/doc/api_samples/servers/server-create-req.json
@@ -5,7 +5,7 @@
"name" : "new-server-test",
"imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
"flavorRef" : "1",
- "availability_zone": "nova",
+ "availability_zone": "us-west",
"OS-DCF:diskConfig": "AUTO",
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/doc/api_samples/servers/server-get-resp.json b/doc/api_samples/servers/server-get-resp.json
index 6c1e246f7cb..66d1930fb48 100644
--- a/doc/api_samples/servers/server-get-resp.json
+++ b/doc/api_samples/servers/server-get-resp.json
@@ -5,8 +5,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -50,7 +50,7 @@
"name": "new-server-test",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -76,6 +76,6 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-09-03T04:01:33Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/server-update-resp.json b/doc/api_samples/servers/server-update-resp.json
index 4607e312f23..6c9de44daaf 100644
--- a/doc/api_samples/servers/server-update-resp.json
+++ b/doc/api_samples/servers/server-update-resp.json
@@ -6,7 +6,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -50,6 +50,6 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2012-12-02T02:11:58Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/servers/servers-details-resp.json b/doc/api_samples/servers/servers-details-resp.json
index 28a1e98efe1..874164288c1 100644
--- a/doc/api_samples/servers/servers-details-resp.json
+++ b/doc/api_samples/servers/servers-details-resp.json
@@ -6,8 +6,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -51,7 +51,7 @@
"name": "new-server-test",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -77,7 +77,7 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-09-03T04:01:32Z",
- "user_id": "fake"
+ "user_id": "admin"
}
],
"servers_links": [
diff --git a/doc/api_samples/servers/v2.16/server-get-resp.json b/doc/api_samples/servers/v2.16/server-get-resp.json
index 8737a0e80d3..99d0155b9f7 100644
--- a/doc/api_samples/servers/v2.16/server-get-resp.json
+++ b/doc/api_samples/servers/v2.16/server-get-resp.json
@@ -3,8 +3,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -48,7 +48,7 @@
"name": "new-server-test",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -87,6 +87,6 @@
"host_status": "UP",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-09-16T02:55:08Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.16/servers-details-resp.json b/doc/api_samples/servers/v2.16/servers-details-resp.json
index 9fc17f6137b..694909da35e 100644
--- a/doc/api_samples/servers/v2.16/servers-details-resp.json
+++ b/doc/api_samples/servers/v2.16/servers-details-resp.json
@@ -4,8 +4,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -49,7 +49,7 @@
"name": "new-server-test",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -88,7 +88,7 @@
"host_status": "UP",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-09-16T02:55:05Z",
- "user_id": "fake"
+ "user_id": "admin"
}
],
"servers_links": [
diff --git a/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json
index b38d40709d1..46b34f09de7 100644
--- a/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json
+++ b/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json
@@ -5,7 +5,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -53,6 +53,6 @@
"OS-DCF:diskConfig": "AUTO",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-11-14T06:29:02Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.19/server-get-resp.json b/doc/api_samples/servers/v2.19/server-get-resp.json
index 3fd3bc93610..f8efde972f7 100644
--- a/doc/api_samples/servers/v2.19/server-get-resp.json
+++ b/doc/api_samples/servers/v2.19/server-get-resp.json
@@ -5,9 +5,9 @@
"addresses": {
"private": [
{
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -88,6 +88,6 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2015-12-07T17:24:15Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.19/server-put-resp.json b/doc/api_samples/servers/v2.19/server-put-resp.json
index 29c8f6ac9b3..ede653a057c 100644
--- a/doc/api_samples/servers/v2.19/server-put-resp.json
+++ b/doc/api_samples/servers/v2.19/server-put-resp.json
@@ -6,7 +6,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -52,6 +52,6 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2015-12-07T19:19:36Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.19/servers-details-resp.json b/doc/api_samples/servers/v2.19/servers-details-resp.json
index 37f83b7dd35..dfbd0baacb9 100644
--- a/doc/api_samples/servers/v2.19/servers-details-resp.json
+++ b/doc/api_samples/servers/v2.19/servers-details-resp.json
@@ -6,9 +6,9 @@
"addresses": {
"private": [
{
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -89,7 +89,7 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2015-12-07T19:54:49Z",
- "user_id": "fake"
+ "user_id": "admin"
}
],
"servers_links": [
diff --git a/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json
index 86a7b41c232..781cee5a0c4 100644
--- a/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json
+++ b/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json
@@ -5,7 +5,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -51,7 +51,7 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-11-14T06:29:02Z",
- "user_id": "fake",
+ "user_id": "admin",
"locked": false,
"description" : "description of foobar",
"tags": ["tag1", "tag2"]
diff --git a/doc/api_samples/servers/v2.3/server-get-resp.json b/doc/api_samples/servers/v2.3/server-get-resp.json
index 22882a074a3..2bfa311eadd 100644
--- a/doc/api_samples/servers/v2.3/server-get-resp.json
+++ b/doc/api_samples/servers/v2.3/server-get-resp.json
@@ -5,8 +5,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -50,7 +50,7 @@
"name": "new-server-test",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -85,6 +85,6 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-09-03T04:01:33Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.3/servers-details-resp.json b/doc/api_samples/servers/v2.3/servers-details-resp.json
index 5e3876fd2c9..6cc2a0c880a 100644
--- a/doc/api_samples/servers/v2.3/servers-details-resp.json
+++ b/doc/api_samples/servers/v2.3/servers-details-resp.json
@@ -6,8 +6,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -51,7 +51,7 @@
"name": "new-server-test",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -86,7 +86,7 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-09-03T04:01:32Z",
- "user_id": "fake"
+ "user_id": "admin"
}
],
"servers_links": [
diff --git a/doc/api_samples/servers/v2.32/server-create-req.json b/doc/api_samples/servers/v2.32/server-create-req.json
index e4f79a43932..f9078243963 100644
--- a/doc/api_samples/servers/v2.32/server-create-req.json
+++ b/doc/api_samples/servers/v2.32/server-create-req.json
@@ -3,7 +3,7 @@
"name" : "device-tagging-server",
"flavorRef" : "http://openstack.example.com/flavors/1",
"networks" : [{
- "uuid" : "ff608d40-75e9-48cb-b745-77bb55b5eaf2",
+ "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"tag": "nic1"
}],
"block_device_mapping_v2": [{
diff --git a/doc/api_samples/servers/v2.42/server-create-req.json b/doc/api_samples/servers/v2.42/server-create-req.json
index 4b000b235c0..f9078243963 100644
--- a/doc/api_samples/servers/v2.42/server-create-req.json
+++ b/doc/api_samples/servers/v2.42/server-create-req.json
@@ -3,7 +3,7 @@
"name" : "device-tagging-server",
"flavorRef" : "http://openstack.example.com/flavors/1",
"networks" : [{
- "uuid" : "ff608d40-75e9-48cb-b745-77bb55b5eaf2",
+ "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"tag": "nic1"
}],
"block_device_mapping_v2": [{
@@ -15,4 +15,4 @@
"tag": "disk1"
}]
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json
index 7069951d97f..790e31d2412 100644
--- a/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json
+++ b/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json
@@ -5,7 +5,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -54,6 +54,6 @@
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-11-14T06:29:02Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.47/server-create-req.json b/doc/api_samples/servers/v2.47/server-create-req.json
index 4068a9ed1ef..bd5dbca36f9 100644
--- a/doc/api_samples/servers/v2.47/server-create-req.json
+++ b/doc/api_samples/servers/v2.47/server-create-req.json
@@ -5,7 +5,7 @@
"name" : "new-server-test",
"imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
"flavorRef" : "6",
- "availability_zone": "nova",
+ "availability_zone": "us-west",
"OS-DCF:diskConfig": "AUTO",
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/doc/api_samples/servers/v2.47/server-get-resp.json b/doc/api_samples/servers/v2.47/server-get-resp.json
index 9983aec3eea..38c28a2d6de 100644
--- a/doc/api_samples/servers/v2.47/server-get-resp.json
+++ b/doc/api_samples/servers/v2.47/server-get-resp.json
@@ -1,7 +1,7 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
@@ -22,9 +22,9 @@
"addresses": {
"private": [
{
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -36,8 +36,7 @@
"disk": 1,
"ephemeral": 0,
"extra_specs": {
- "hw:mem_page_size": "2048",
- "hw:cpu_policy": "dedicated"
+ "hw:numa_nodes": "1"
},
"original_name": "m1.tiny.specs",
"ram": 512,
@@ -92,6 +91,6 @@
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:00Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.47/server-update-resp.json b/doc/api_samples/servers/v2.47/server-update-resp.json
index abf9e107d84..7857b0e34e7 100644
--- a/doc/api_samples/servers/v2.47/server-update-resp.json
+++ b/doc/api_samples/servers/v2.47/server-update-resp.json
@@ -6,7 +6,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -53,6 +53,6 @@
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2012-12-02T02:11:58Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.47/servers-details-resp.json b/doc/api_samples/servers/v2.47/servers-details-resp.json
index a9aaea4cff4..67c81e8fc2f 100644
--- a/doc/api_samples/servers/v2.47/servers-details-resp.json
+++ b/doc/api_samples/servers/v2.47/servers-details-resp.json
@@ -2,7 +2,7 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
@@ -23,9 +23,9 @@
"addresses": {
"private": [
{
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -37,8 +37,7 @@
"disk": 1,
"ephemeral": 0,
"extra_specs": {
- "hw:mem_page_size": "2048",
- "hw:cpu_policy": "dedicated"
+ "hw:numa_nodes": "1"
},
"original_name": "m1.tiny.specs",
"ram": 512,
@@ -93,7 +92,7 @@
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
- "user_id": "fake"
+ "user_id": "admin"
}
],
"servers_links": [
diff --git a/doc/api_samples/servers/v2.52/server-create-req.json b/doc/api_samples/servers/v2.52/server-create-req.json
index 36d2b4cf5cc..b629e717473 100644
--- a/doc/api_samples/servers/v2.52/server-create-req.json
+++ b/doc/api_samples/servers/v2.52/server-create-req.json
@@ -5,7 +5,7 @@
"name" : "new-server-test",
"imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
"flavorRef" : "http://openstack.example.com/flavors/1",
- "availability_zone": "nova",
+ "availability_zone": "us-west",
"OS-DCF:diskConfig": "AUTO",
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/doc/api_samples/servers/v2.52/server-get-resp.json b/doc/api_samples/servers/v2.52/server-get-resp.json
index ff651f8547a..ec3ea201210 100644
--- a/doc/api_samples/servers/v2.52/server-get-resp.json
+++ b/doc/api_samples/servers/v2.52/server-get-resp.json
@@ -1,7 +1,7 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
@@ -22,9 +22,9 @@
"addresses": {
"private": [
{
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -89,6 +89,6 @@
"tags": ["tag1", "tag2"],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:00Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.52/servers-details-resp.json b/doc/api_samples/servers/v2.52/servers-details-resp.json
index 98285ddc098..212a2ec1ded 100644
--- a/doc/api_samples/servers/v2.52/servers-details-resp.json
+++ b/doc/api_samples/servers/v2.52/servers-details-resp.json
@@ -2,7 +2,7 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
@@ -23,9 +23,9 @@
"addresses": {
"private": [
{
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -90,7 +90,7 @@
"tags": ["tag1", "tag2"],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
- "user_id": "fake"
+ "user_id": "admin"
}
],
"servers_links": [
diff --git a/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json
index 612bd601994..cf809aeb530 100644
--- a/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json
+++ b/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json
@@ -5,7 +5,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -54,7 +54,7 @@
"OS-DCF:diskConfig": "AUTO",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-11-14T06:29:02Z",
- "user_id": "fake",
+ "user_id": "admin",
"tags": []
}
}
diff --git a/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json
index 05225e7dcb2..92b43c45dad 100644
--- a/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json
+++ b/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json
@@ -5,7 +5,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -54,7 +54,7 @@
"OS-DCF:diskConfig": "AUTO",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-11-14T06:29:02Z",
- "user_id": "fake",
+ "user_id": "admin",
"tags": [],
"user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
}
diff --git a/doc/api_samples/servers/v2.57/server-create-req.json b/doc/api_samples/servers/v2.57/server-create-req.json
index c6d8dec2424..7c5011e4fe1 100644
--- a/doc/api_samples/servers/v2.57/server-create-req.json
+++ b/doc/api_samples/servers/v2.57/server-create-req.json
@@ -5,7 +5,7 @@
"name" : "new-server-test",
"imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
"flavorRef" : "http://openstack.example.com/flavors/1",
- "availability_zone": "nova",
+ "availability_zone": "us-west",
"OS-DCF:diskConfig": "AUTO",
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json
index 546e7cb8bb4..fa3c34cb6d5 100644
--- a/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json
+++ b/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json
@@ -6,7 +6,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -17,8 +17,7 @@
"disk": 1,
"ephemeral": 0,
"extra_specs": {
- "hw:cpu_policy": "dedicated",
- "hw:mem_page_size": "2048"
+ "hw:numa_nodes": "1"
},
"original_name": "m1.tiny.specs",
"ram": 512,
@@ -63,7 +62,7 @@
"674736e3-f25c-405c-8362-bbf991e0ce0a"
],
"updated": "2017-10-10T16:06:03Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.63/server-create-req.json b/doc/api_samples/servers/v2.63/server-create-req.json
index 5523ce8d349..7a576f02497 100644
--- a/doc/api_samples/servers/v2.63/server-create-req.json
+++ b/doc/api_samples/servers/v2.63/server-create-req.json
@@ -5,7 +5,7 @@
"name" : "new-server-test",
"imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
"flavorRef" : "6",
- "availability_zone": "nova",
+ "availability_zone": "%(availability_zone)s",
"OS-DCF:diskConfig": "AUTO",
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/doc/api_samples/servers/v2.63/server-get-resp.json b/doc/api_samples/servers/v2.63/server-get-resp.json
index 5645499fc77..e47589885bc 100644
--- a/doc/api_samples/servers/v2.63/server-get-resp.json
+++ b/doc/api_samples/servers/v2.63/server-get-resp.json
@@ -1,7 +1,7 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
@@ -22,9 +22,9 @@
"addresses": {
"private": [
{
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -36,8 +36,7 @@
"disk": 1,
"ephemeral": 0,
"extra_specs": {
- "hw:cpu_policy": "dedicated",
- "hw:mem_page_size": "2048"
+ "hw:numa_nodes": "1"
},
"original_name": "m1.tiny.specs",
"ram": 512,
@@ -87,6 +86,6 @@
"674736e3-f25c-405c-8362-bbf991e0ce0a"
],
"updated": "2017-02-14T19:24:00Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.63/server-update-resp.json b/doc/api_samples/servers/v2.63/server-update-resp.json
index 5a47c0c7cea..c1d544fed96 100644
--- a/doc/api_samples/servers/v2.63/server-update-resp.json
+++ b/doc/api_samples/servers/v2.63/server-update-resp.json
@@ -6,7 +6,7 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -17,8 +17,7 @@
"disk": 1,
"ephemeral": 0,
"extra_specs": {
- "hw:cpu_policy": "dedicated",
- "hw:mem_page_size": "2048"
+ "hw:numa_nodes": "1"
},
"original_name": "m1.tiny.specs",
"ram": 512,
@@ -60,6 +59,6 @@
"674736e3-f25c-405c-8362-bbf991e0ce0a"
],
"updated": "2012-12-02T02:11:58Z",
- "user_id": "fake"
+ "user_id": "admin"
}
}
diff --git a/doc/api_samples/servers/v2.63/servers-details-resp.json b/doc/api_samples/servers/v2.63/servers-details-resp.json
index 620a7a22342..358439ededc 100644
--- a/doc/api_samples/servers/v2.63/servers-details-resp.json
+++ b/doc/api_samples/servers/v2.63/servers-details-resp.json
@@ -2,7 +2,7 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "new-server-test",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
@@ -23,9 +23,9 @@
"addresses": {
"private": [
{
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
- "addr": "192.168.0.3",
+ "addr": "192.168.1.30",
"version": 4
}
]
@@ -37,8 +37,7 @@
"disk": 1,
"ephemeral": 0,
"extra_specs": {
- "hw:cpu_policy": "dedicated",
- "hw:mem_page_size": "2048"
+ "hw:numa_nodes": "1"
},
"original_name": "m1.tiny.specs",
"ram": 512,
@@ -88,7 +87,7 @@
"674736e3-f25c-405c-8362-bbf991e0ce0a"
],
"updated": "2017-10-10T15:49:09Z",
- "user_id": "fake"
+ "user_id": "admin"
}
],
"servers_links": [
diff --git a/doc/api_samples/servers/v2.66/server-create-req.json b/doc/api_samples/servers/v2.66/server-create-req.json
new file mode 100644
index 00000000000..59c9101f020
--- /dev/null
+++ b/doc/api_samples/servers/v2.66/server-create-req.json
@@ -0,0 +1,28 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "6",
+ "availability_zone": "us-west",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "networks": "auto",
+ "trusted_image_certificates": [
+ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8",
+ "674736e3-f25c-405c-8362-bbf991e0ce0a"
+ ]
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.66/server-create-resp.json b/doc/api_samples/servers/v2.66/server-create-resp.json
new file mode 100644
index 00000000000..7400eb33272
--- /dev/null
+++ b/doc/api_samples/servers/v2.66/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "wKLKinb9u7GM",
+ "id": "aab35fd0-b459-4b59-9308-5a23147f3165",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/aab35fd0-b459-4b59-9308-5a23147f3165",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/aab35fd0-b459-4b59-9308-5a23147f3165",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.66/servers-details-with-changes-before.json b/doc/api_samples/servers/v2.66/servers-details-with-changes-before.json
new file mode 100644
index 00000000000..f4c39ac5197
--- /dev/null
+++ b/doc/api_samples/servers/v2.66/servers-details-with-changes-before.json
@@ -0,0 +1,93 @@
+{
+ "servers": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2018-10-10T15:49:09.516729",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.0.1",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2018-10-10T15:49:08Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {
+ "hw:numa_nodes": "1"
+ },
+ "original_name": "m1.tiny.specs",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "569f39f9-7c76-42a1-9c2d-8394e2638a6e",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": [
+ "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8",
+ "674736e3-f25c-405c-8362-bbf991e0ce0a"
+ ],
+ "updated": "2018-10-10T15:49:09Z",
+ "user_id": "admin"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.66/servers-list-with-changes-before.json b/doc/api_samples/servers/v2.66/servers-list-with-changes-before.json
new file mode 100644
index 00000000000..d86d5ea9485
--- /dev/null
+++ b/doc/api_samples/servers/v2.66/servers-list-with-changes-before.json
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "6e3a87e6-a133-452e-86e1-a31291c1b1c8",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.67/server-create-req.json b/doc/api_samples/servers/v2.67/server-create-req.json
new file mode 100644
index 00000000000..d8cd28f80c8
--- /dev/null
+++ b/doc/api_samples/servers/v2.67/server-create-req.json
@@ -0,0 +1,19 @@
+{
+ "server" : {
+ "name" : "bfv-server-with-volume-type",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
+ "networks" : [{
+ "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "tag": "nic1"
+ }],
+ "block_device_mapping_v2": [{
+ "uuid": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "source_type": "image",
+ "destination_type": "volume",
+ "boot_index": 0,
+ "volume_size": "1",
+ "tag": "disk1",
+ "volume_type": "lvm-1"
+ }]
+ }
+}
diff --git a/doc/api_samples/servers/v2.67/server-create-resp.json b/doc/api_samples/servers/v2.67/server-create-resp.json
new file mode 100644
index 00000000000..dd0bb9f2284
--- /dev/null
+++ b/doc/api_samples/servers/v2.67/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "S5wqy9sPYUvU",
+ "id": "97108291-2fd7-4dc2-a909-eaae0306a6a9",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.69/server-create-req.json b/doc/api_samples/servers/v2.69/server-create-req.json
new file mode 100644
index 00000000000..ae72809bb84
--- /dev/null
+++ b/doc/api_samples/servers/v2.69/server-create-req.json
@@ -0,0 +1,20 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "networks": "auto"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.69/server-create-resp.json b/doc/api_samples/servers/v2.69/server-create-resp.json
new file mode 100644
index 00000000000..a5aa94d21c5
--- /dev/null
+++ b/doc/api_samples/servers/v2.69/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "mqtDAwb2y7Zh",
+ "id": "6f81aefe-472a-49d8-ba8d-758a5082c7e5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/6f81aefe-472a-49d8-ba8d-758a5082c7e5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6f81aefe-472a-49d8-ba8d-758a5082c7e5",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.69/server-get-resp.json b/doc/api_samples/servers/v2.69/server-get-resp.json
new file mode 100644
index 00000000000..981cd23f8c0
--- /dev/null
+++ b/doc/api_samples/servers/v2.69/server-get-resp.json
@@ -0,0 +1,39 @@
+{
+ "server": {
+ "OS-EXT-AZ:availability_zone": "UNKNOWN",
+ "OS-EXT-STS:power_state": 0,
+ "created": "2018-12-03T21:06:18Z",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "id": "33748c23-38dd-4f70-b774-522fc69e7b67",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "status": "UNKNOWN",
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "user_id": "admin",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/33748c23-38dd-4f70-b774-522fc69e7b67",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/33748c23-38dd-4f70-b774-522fc69e7b67",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/v2.69/servers-details-resp.json b/doc/api_samples/servers/v2.69/servers-details-resp.json
new file mode 100644
index 00000000000..83ad414943c
--- /dev/null
+++ b/doc/api_samples/servers/v2.69/servers-details-resp.json
@@ -0,0 +1,20 @@
+{
+ "servers": [
+ {
+ "created": "2018-12-03T21:06:18Z",
+ "id": "b6b0410f-b65f-4473-855e-5d82a71759e0",
+ "status": "UNKNOWN",
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/b6b0410f-b65f-4473-855e-5d82a71759e0",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/b6b0410f-b65f-4473-855e-5d82a71759e0",
+ "rel": "bookmark"
+ }
+ ]
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.69/servers-list-resp.json b/doc/api_samples/servers/v2.69/servers-list-resp.json
new file mode 100644
index 00000000000..5a5c988a857
--- /dev/null
+++ b/doc/api_samples/servers/v2.69/servers-list-resp.json
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "2e136db7-b4a4-4815-8a00-25d9bfe59617",
+ "status": "UNKNOWN",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2e136db7-b4a4-4815-8a00-25d9bfe59617",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2e136db7-b4a4-4815-8a00-25d9bfe59617",
+ "rel": "bookmark"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.71/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.71/server-action-rebuild-resp.json
new file mode 100644
index 00000000000..16dd0a10301
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-action-rebuild-resp.json
@@ -0,0 +1,65 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "created": "2019-02-28T03:16:19Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "36b2afd5-1684-4d18-a49c-915bf0f5344c",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/36b2afd5-1684-4d18-a49c-915bf0f5344c",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/36b2afd5-1684-4d18-a49c-915bf0f5344c",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "progress": 0,
+ "server_groups": [
+ "f3d86fe6-4246-4be8-b87c-eb894626c741"
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-02-28T03:16:20Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "admin"
+ }
+}
diff --git a/doc/api_samples/servers/v2.71/server-action-rebuild.json b/doc/api_samples/servers/v2.71/server-action-rebuild.json
new file mode 100644
index 00000000000..f1431a05062
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-action-rebuild.json
@@ -0,0 +1,14 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.71/server-create-req.json b/doc/api_samples/servers/v2.71/server-create-req.json
new file mode 100644
index 00000000000..b5a9b238aca
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-create-req.json
@@ -0,0 +1,23 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "networks": "auto"
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "group": "f3d86fe6-4246-4be8-b87c-eb894626c741"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.71/server-create-resp.json b/doc/api_samples/servers/v2.71/server-create-resp.json
new file mode 100644
index 00000000000..7ebe2e20a2d
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "DB2bQBhxvq8a",
+ "id": "84e2b49d-39a9-4d32-9100-e62161c236db",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.71/server-get-down-cell-resp.json b/doc/api_samples/servers/v2.71/server-get-down-cell-resp.json
new file mode 100644
index 00000000000..9dded66a72e
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-get-down-cell-resp.json
@@ -0,0 +1,42 @@
+{
+ "server": {
+ "OS-EXT-AZ:availability_zone": "UNKNOWN",
+ "OS-EXT-STS:power_state": 0,
+ "created": "2019-02-28T03:16:19Z",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "id": "2669556b-b4a3-41f1-a0c1-f9c7ff75e53c",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "server_groups": [
+ "f3d86fe6-4246-4be8-b87c-eb894626c741"
+ ],
+ "status": "UNKNOWN",
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "user_id": "admin",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2669556b-b4a3-41f1-a0c1-f9c7ff75e53c",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2669556b-b4a3-41f1-a0c1-f9c7ff75e53c",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/v2.71/server-get-resp.json b/doc/api_samples/servers/v2.71/server-get-resp.json
new file mode 100644
index 00000000000..72e893e2e0f
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-get-resp.json
@@ -0,0 +1,89 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-0scisg0g",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2019-02-28T03:16:19.600768",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2019-02-28T03:16:18Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "84e2b49d-39a9-4d32-9100-e62161c236db",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [
+ "f3d86fe6-4246-4be8-b87c-eb894626c741"
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-02-28T03:16:19Z",
+ "user_id": "admin"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.71/server-groups-post-req.json b/doc/api_samples/servers/v2.71/server-groups-post-req.json
new file mode 100644
index 00000000000..bbdf2ff4c84
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-groups-post-req.json
@@ -0,0 +1,6 @@
+{
+ "server_group": {
+ "name": "test",
+ "policy": "affinity"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.71/server-groups-post-resp.json b/doc/api_samples/servers/v2.71/server-groups-post-resp.json
new file mode 100644
index 00000000000..99b9c98f2d2
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-groups-post-resp.json
@@ -0,0 +1,11 @@
+{
+ "server_group": {
+ "id": "f3d86fe6-4246-4be8-b87c-eb894626c741",
+ "members": [],
+ "name": "test",
+ "policy": "affinity",
+ "project_id": "6f70656e737461636b20342065766572",
+ "rules": {},
+ "user_id": "admin"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.71/server-update-req.json b/doc/api_samples/servers/v2.71/server-update-req.json
new file mode 100644
index 00000000000..3b3995d51e4
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-update-req.json
@@ -0,0 +1,9 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "name": "new-server-test",
+ "description": "Sample description"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.71/server-update-resp.json b/doc/api_samples/servers/v2.71/server-update-resp.json
new file mode 100644
index 00000000000..408f0bea4a3
--- /dev/null
+++ b/doc/api_samples/servers/v2.71/server-update-resp.json
@@ -0,0 +1,62 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2019-02-28T03:16:19Z",
+ "description": "Sample description",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "60e840f8-dd17-476b-bd1d-33785066c496",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/60e840f8-dd17-476b-bd1d-33785066c496",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/60e840f8-dd17-476b-bd1d-33785066c496",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "server_groups": [
+ "f3d86fe6-4246-4be8-b87c-eb894626c741"
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-02-28T03:16:19Z",
+ "user_id": "admin"
+ }
+}
diff --git a/doc/api_samples/servers/v2.73/lock-server-with-reason.json b/doc/api_samples/servers/v2.73/lock-server-with-reason.json
new file mode 100644
index 00000000000..c307fb39bf7
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/lock-server-with-reason.json
@@ -0,0 +1,3 @@
+{
+ "lock": {"locked_reason": "I don't want to work"}
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.73/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.73/server-action-rebuild-resp.json
new file mode 100644
index 00000000000..d6be9e95e14
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/server-action-rebuild-resp.json
@@ -0,0 +1,64 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "created": "2019-04-23T17:10:22Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "progress": 0,
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:10:24Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "admin"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.73/server-action-rebuild.json b/doc/api_samples/servers/v2.73/server-action-rebuild.json
new file mode 100644
index 00000000000..f1431a05062
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/server-action-rebuild.json
@@ -0,0 +1,14 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.73/server-create-req.json b/doc/api_samples/servers/v2.73/server-create-req.json
new file mode 100644
index 00000000000..c8ae2eac974
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/server-create-req.json
@@ -0,0 +1,20 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "networks": "auto"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.73/server-create-resp.json b/doc/api_samples/servers/v2.73/server-create-resp.json
new file mode 100644
index 00000000000..d5ff5974d9c
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "kJTmMkszoB6A",
+ "id": "ae10adbb-9b5e-4667-9cc5-05ebdc80a941",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ae10adbb-9b5e-4667-9cc5-05ebdc80a941",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/ae10adbb-9b5e-4667-9cc5-05ebdc80a941",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.73/server-get-resp.json b/doc/api_samples/servers/v2.73/server-get-resp.json
new file mode 100644
index 00000000000..edd30317f57
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/server-get-resp.json
@@ -0,0 +1,88 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2019-04-23T15:19:09Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "0e12087a-7c87-476a-8f84-7398e991cecc",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0e12087a-7c87-476a-8f84-7398e991cecc",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e12087a-7c87-476a-8f84-7398e991cecc",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": true,
+ "locked_reason": "I don't want to work",
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T15:19:11Z",
+ "user_id": "admin"
+ }
+}
diff --git a/doc/api_samples/servers/v2.73/server-update-req.json b/doc/api_samples/servers/v2.73/server-update-req.json
new file mode 100644
index 00000000000..3b3995d51e4
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/server-update-req.json
@@ -0,0 +1,9 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "name": "new-server-test",
+ "description": "Sample description"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.73/server-update-resp.json b/doc/api_samples/servers/v2.73/server-update-resp.json
new file mode 100644
index 00000000000..b99333e902d
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/server-update-resp.json
@@ -0,0 +1,61 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2019-04-23T17:37:48Z",
+ "description": "Sample description",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:37:48Z",
+ "user_id": "admin"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.73/servers-details-resp.json b/doc/api_samples/servers/v2.73/servers-details-resp.json
new file mode 100644
index 00000000000..98fcc913063
--- /dev/null
+++ b/doc/api_samples/servers/v2.73/servers-details-resp.json
@@ -0,0 +1,89 @@
+{
+ "servers": [
+ {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-l0i0clt2",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2019-04-23T15:19:15.317839",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2019-04-23T15:19:14Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "2ce4c5b3-2866-4972-93ce-77a2ea46a7f9",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2ce4c5b3-2866-4972-93ce-77a2ea46a7f9",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2ce4c5b3-2866-4972-93ce-77a2ea46a7f9",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": true,
+ "locked_reason": "I don't want to work",
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T15:19:15Z",
+ "user_id": "admin"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json b/doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json
new file mode 100644
index 00000000000..43552ed6385
--- /dev/null
+++ b/doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json
@@ -0,0 +1,23 @@
+{
+ "server" : {
+ "adminPass": "MySecretPass",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "6",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "networks": "auto",
+ "host": "openstack-node-01",
+ "hypervisor_hostname": "openstack-node-01"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.74/server-create-req-with-only-host.json b/doc/api_samples/servers/v2.74/server-create-req-with-only-host.json
new file mode 100644
index 00000000000..aa0dc613b12
--- /dev/null
+++ b/doc/api_samples/servers/v2.74/server-create-req-with-only-host.json
@@ -0,0 +1,22 @@
+{
+ "server" : {
+ "adminPass": "MySecretPass",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "6",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "networks": "auto",
+ "host": "openstack-node-01"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.74/server-create-req-with-only-node.json b/doc/api_samples/servers/v2.74/server-create-req-with-only-node.json
new file mode 100644
index 00000000000..ab9ec85350f
--- /dev/null
+++ b/doc/api_samples/servers/v2.74/server-create-req-with-only-node.json
@@ -0,0 +1,22 @@
+{
+ "server" : {
+ "adminPass": "MySecretPass",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "6",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "networks": "auto",
+ "hypervisor_hostname": "openstack-node-01"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.74/server-create-resp.json b/doc/api_samples/servers/v2.74/server-create-resp.json
new file mode 100644
index 00000000000..7ebe2e20a2d
--- /dev/null
+++ b/doc/api_samples/servers/v2.74/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "DB2bQBhxvq8a",
+ "id": "84e2b49d-39a9-4d32-9100-e62161c236db",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.75/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.75/server-action-rebuild-resp.json
new file mode 100644
index 00000000000..a4421b85e89
--- /dev/null
+++ b/doc/api_samples/servers/v2.75/server-action-rebuild-resp.json
@@ -0,0 +1,89 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "2019-04-23T17:10:22Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "host_status": "UP",
+ "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:10:24Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "admin"
+ }
+}
diff --git a/doc/api_samples/servers/v2.75/server-action-rebuild.json b/doc/api_samples/servers/v2.75/server-action-rebuild.json
new file mode 100644
index 00000000000..f1431a05062
--- /dev/null
+++ b/doc/api_samples/servers/v2.75/server-action-rebuild.json
@@ -0,0 +1,14 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/servers/v2.75/server-update-req.json b/doc/api_samples/servers/v2.75/server-update-req.json
new file mode 100644
index 00000000000..1341355ce52
--- /dev/null
+++ b/doc/api_samples/servers/v2.75/server-update-req.json
@@ -0,0 +1,9 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "name": "new-server-test",
+ "description": "Sample description"
+ }
+}
diff --git a/doc/api_samples/servers/v2.75/server-update-resp.json b/doc/api_samples/servers/v2.75/server-update-resp.json
new file mode 100644
index 00000000000..0fc5cf237bd
--- /dev/null
+++ b/doc/api_samples/servers/v2.75/server-update-resp.json
@@ -0,0 +1,88 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:host": "compute",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:kernel_id": "",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:ramdisk_id": "",
+ "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2012-12-02T02:11:57Z",
+ "description": "Sample description",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "6e84af987b4e7ec1c039b16d21f508f4a505672bd94fb0218b668d07",
+ "host_status": "UP",
+ "id": "324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2012-12-02T02:11:58Z",
+ "user_id": "admin"
+ }
+}
diff --git a/doc/api_samples/servers/v2.9/server-get-resp.json b/doc/api_samples/servers/v2.9/server-get-resp.json
index c92ff85fd58..25ac6ae3052 100644
--- a/doc/api_samples/servers/v2.9/server-get-resp.json
+++ b/doc/api_samples/servers/v2.9/server-get-resp.json
@@ -5,8 +5,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -50,7 +50,7 @@
"name": "new-server-test",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -85,7 +85,7 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-09-03T04:01:33Z",
- "user_id": "fake",
+ "user_id": "admin",
"locked": false
}
}
diff --git a/doc/api_samples/servers/v2.9/servers-details-resp.json b/doc/api_samples/servers/v2.9/servers-details-resp.json
index 0ca874f3332..84cb44c1b27 100644
--- a/doc/api_samples/servers/v2.9/servers-details-resp.json
+++ b/doc/api_samples/servers/v2.9/servers-details-resp.json
@@ -6,8 +6,8 @@
"addresses": {
"private": [
{
- "addr": "192.168.0.3",
- "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
"OS-EXT-IPS:type": "fixed",
"version": 4
}
@@ -51,7 +51,7 @@
"name": "new-server-test",
"config_drive": "",
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "us-west",
"OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -86,7 +86,7 @@
"status": "ACTIVE",
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2013-09-03T04:01:32Z",
- "user_id": "fake",
+ "user_id": "admin",
"locked": false
}
],
diff --git a/doc/api_samples/servers/v2.90/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.90/server-action-rebuild-resp.json
new file mode 100644
index 00000000000..d701b55c0a0
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/server-action-rebuild-resp.json
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "updated-hostname",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "2019-04-23T17:10:22Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:10:24Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.90/server-action-rebuild.json b/doc/api_samples/servers/v2.90/server-action-rebuild.json
new file mode 100644
index 00000000000..32148a45be3
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/server-action-rebuild.json
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "hostname": "custom-hostname",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/doc/api_samples/servers/v2.90/server-create-req.json b/doc/api_samples/servers/v2.90/server-create-req.json
new file mode 100644
index 00000000000..c0818fd5262
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/server-create-req.json
@@ -0,0 +1,30 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "availability_zone": "us-west",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "custom-hostname",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg=="
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e"
+ }
+}
diff --git a/doc/api_samples/servers/v2.90/server-create-resp.json b/doc/api_samples/servers/v2.90/server-create-resp.json
new file mode 100644
index 00000000000..f50e29dd8be
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "6NpUwoz2QDRN",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/v2.90/server-get-resp.json b/doc/api_samples/servers/v2.90/server-get-resp.json
new file mode 100644
index 00000000000..063bdbce78b
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/server-get-resp.json
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.90/server-update-req.json b/doc/api_samples/servers/v2.90/server-update-req.json
new file mode 100644
index 00000000000..348f926cdf8
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/server-update-req.json
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "4.3.2.1",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname" : "new-server-hostname"
+ }
+}
diff --git a/doc/api_samples/servers/v2.90/server-update-resp.json b/doc/api_samples/servers/v2.90/server-update-resp.json
new file mode 100644
index 00000000000..0dcba8328e7
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/server-update-resp.json
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.90/servers-details-resp.json b/doc/api_samples/servers/v2.90/servers-details-resp.json
new file mode 100644
index 00000000000..14cb7708eb7
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/servers-details-resp.json
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:32Z",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.90/servers-list-resp.json b/doc/api_samples/servers/v2.90/servers-list-resp.json
new file mode 100644
index 00000000000..799ef9ba44b
--- /dev/null
+++ b/doc/api_samples/servers/v2.90/servers-list-resp.json
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "next"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index 7e2434ee332..f976225f9c4 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.65",
+ "version": "2.90",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 8fb2d2430cf..327dbd82d66 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.65",
+ "version": "2.90",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_schemas/config_drive.json b/doc/api_schemas/config_drive.json
new file mode 100644
index 00000000000..d4ba5e7d267
--- /dev/null
+++ b/doc/api_schemas/config_drive.json
@@ -0,0 +1,30 @@
+{
+ "anyOf": [
+ {
+ "type": "object",
+ "properties": {
+ "meta_data": {
+ "type": "object"
+ },
+ "network_data": {
+ "type": "object"
+ },
+ "user_data": {
+ "type": [
+ "object",
+ "array",
+ "string",
+ "null"
+ ]
+ }
+ },
+ "additionalProperties": false
+ },
+ {
+ "type": [
+ "string",
+ "null"
+ ]
+ }
+ ]
+}
diff --git a/doc/api_schemas/network_data.json b/doc/api_schemas/network_data.json
new file mode 100644
index 00000000000..f980973d753
--- /dev/null
+++ b/doc/api_schemas/network_data.json
@@ -0,0 +1,580 @@
+{
+ "$schema": "http://openstack.org/nova/network_data.json#",
+ "id": "http://openstack.org/nova/network_data.json",
+ "type": "object",
+ "title": "OpenStack Nova network metadata schema",
+ "description": "Schema of Nova instance network configuration information",
+ "required": [
+ "links",
+ "networks",
+ "services"
+ ],
+ "properties": {
+ "links": {
+ "$id": "#/properties/links",
+ "type": "array",
+ "title": "L2 interfaces settings",
+ "items": {
+ "$id": "#/properties/links/items",
+ "oneOf": [
+ {
+ "$ref": "#/definitions/l2_link"
+ },
+ {
+ "$ref": "#/definitions/l2_bond"
+ },
+ {
+ "$ref": "#/definitions/l2_vlan"
+ }
+ ]
+ }
+ },
+ "networks": {
+ "$id": "#/properties/networks",
+ "type": "array",
+ "title": "L3 networks",
+ "items": {
+ "$id": "#/properties/networks/items",
+ "oneOf": [
+ {
+ "$ref": "#/definitions/l3_ipv4_network"
+ },
+ {
+ "$ref": "#/definitions/l3_ipv6_network"
+ }
+ ]
+ }
+ },
+ "services": {
+ "$ref": "#/definitions/services"
+ }
+ },
+ "definitions": {
+ "l2_address": {
+ "$id": "#/definitions/l2_address",
+ "type": "string",
+ "pattern": "(?i)^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$",
+ "title": "L2 interface address",
+ "examples": [
+ "fa:16:3e:9c:bf:3d"
+ ]
+ },
+ "l2_id": {
+ "$id": "#/definitions/l2_id",
+ "type": "string",
+ "title": "L2 interface ID",
+ "examples": [
+ "eth0"
+ ]
+ },
+ "l2_mtu": {
+ "$id": "#/definitions/l2_mtu",
+ "title": "L2 interface MTU",
+ "anyOf": [
+ {
+ "type": "number",
+ "minimum": 1,
+ "maximum": 65535
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "examples": [
+ 1500
+ ]
+ },
+ "l2_vif_id": {
+ "$id": "#/definitions/l2_vif_id",
+ "type": "string",
+ "title": "Virtual interface ID",
+ "examples": [
+ "cd9f6d46-4a3a-43ab-a466-994af9db96fc"
+ ]
+ },
+ "l2_link": {
+ "$id": "#/definitions/l2_link",
+ "type": "object",
+ "title": "L2 interface configuration settings",
+ "required": [
+ "ethernet_mac_address",
+ "id",
+ "type"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l2_id"
+ },
+ "ethernet_mac_address": {
+ "$ref": "#/definitions/l2_address"
+ },
+ "mtu": {
+ "$ref": "#/definitions/l2_mtu"
+ },
+ "type": {
+ "$id": "#/definitions/l2_link/properties/type",
+ "type": "string",
+ "enum": [
+ "bridge",
+ "dvs",
+ "hw_veb",
+ "hyperv",
+ "ovs",
+ "tap",
+ "vhostuser",
+ "vif",
+ "phy"
+ ],
+ "title": "Interface type",
+ "examples": [
+ "bridge"
+ ]
+ },
+ "vif_id": {
+ "$ref": "#/definitions/l2_vif_id"
+ }
+ }
+ },
+ "l2_bond": {
+ "$id": "#/definitions/l2_bond",
+ "type": "object",
+ "title": "L2 bonding interface configuration settings",
+ "required": [
+ "ethernet_mac_address",
+ "id",
+ "type",
+ "bond_mode",
+ "bond_links"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l2_id"
+ },
+ "ethernet_mac_address": {
+ "$ref": "#/definitions/l2_address"
+ },
+ "mtu": {
+ "$ref": "#/definitions/l2_mtu"
+ },
+ "type": {
+ "$id": "#/definitions/l2_bond/properties/type",
+ "type": "string",
+ "enum": [
+ "bond"
+ ],
+ "title": "Interface type",
+ "examples": [
+ "bond"
+ ]
+ },
+ "vif_id": {
+ "$ref": "#/definitions/l2_vif_id"
+ },
+ "bond_mode": {
+ "$id": "#/definitions/bond/properties/bond_mode",
+ "type": "string",
+ "title": "Port bonding type",
+ "enum": [
+ "802.3ad",
+ "balance-rr",
+ "active-backup",
+ "balance-xor",
+ "broadcast",
+ "balance-tlb",
+ "balance-alb"
+ ],
+ "examples": [
+ "802.3ad"
+ ]
+ },
+ "bond_links": {
+ "$id": "#/definitions/bond/properties/bond_links",
+ "type": "array",
+ "title": "Port bonding links",
+ "items": {
+ "$id": "#/definitions/bond/properties/bond_links/items",
+ "type": "string"
+ }
+ }
+ }
+ },
+ "l2_vlan": {
+ "$id": "#/definitions/l2_vlan",
+ "type": "object",
+ "title": "L2 VLAN interface configuration settings",
+ "required": [
+ "vlan_mac_address",
+ "id",
+ "type",
+ "vlan_link",
+ "vlan_id"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l2_id"
+ },
+ "vlan_mac_address": {
+ "$ref": "#/definitions/l2_address"
+ },
+ "mtu": {
+ "$ref": "#/definitions/l2_mtu"
+ },
+ "type": {
+ "$id": "#/definitions/l2_vlan/properties/type",
+ "type": "string",
+ "enum": [
+ "vlan"
+ ],
+ "title": "VLAN interface type",
+ "examples": [
+ "vlan"
+ ]
+ },
+ "vif_id": {
+ "$ref": "#/definitions/l2_vif_id"
+ },
+ "vlan_id": {
+ "$id": "#/definitions/l2_vlan/properties/vlan_id",
+ "type": "integer",
+ "title": "VLAN ID"
+ },
+ "vlan_link": {
+ "$id": "#/definitions/l2_vlan/properties/vlan_link",
+ "type": "string",
+ "title": "VLAN link name"
+ }
+ }
+ },
+ "l3_id": {
+ "$id": "#/definitions/l3_id",
+ "type": "string",
+ "title": "Network name",
+ "examples": [
+ "network0"
+ ]
+ },
+ "l3_link": {
+ "$id": "#/definitions/l3_link",
+ "type": "string",
+ "title": "L2 network link to use for L3 interface",
+ "examples": [
+ "99e88329-f20d-4741-9593-25bf07847b16"
+ ]
+ },
+ "l3_network_id": {
+ "$id": "#/definitions/l3_network_id",
+ "type": "string",
+ "title": "Network ID",
+ "examples": [
+ "99e88329-f20d-4741-9593-25bf07847b16"
+ ]
+ },
+ "l3_ipv4_type": {
+ "$id": "#/definitions/l3_ipv4_type",
+ "type": "string",
+ "enum": [
+ "ipv4",
+ "ipv4_dhcp"
+ ],
+ "title": "L3 IPv4 network type",
+ "examples": [
+ "ipv4_dhcp"
+ ]
+ },
+ "l3_ipv6_type": {
+ "$id": "#/definitions/l3_ipv6_type",
+ "type": "string",
+ "enum": [
+ "ipv6",
+ "ipv6_dhcp",
+ "ipv6_slaac"
+ ],
+ "title": "L3 IPv6 network type",
+ "examples": [
+ "ipv6_dhcp"
+ ]
+ },
+ "l3_ipv4_host": {
+ "$id": "#/definitions/l3_ipv4_host",
+ "type": "string",
+ "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
+ "title": "L3 IPv4 host address",
+ "examples": [
+ "192.168.81.99"
+ ]
+ },
+ "l3_ipv6_host": {
+ "$id": "#/definitions/l3_ipv6_host",
+ "type": "string",
+ "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(/[0-9]{1,2})?$",
+ "title": "L3 IPv6 host address",
+ "examples": [
+ "2001:db8:3:4::192.168.81.99"
+ ]
+ },
+ "l3_ipv4_netmask": {
+ "$id": "#/definitions/l3_ipv4_netmask",
+ "type": "string",
+ "pattern": "^(254|252|248|240|224|192|128|0)\\.0\\.0\\.0|255\\.(254|252|248|240|224|192|128|0)\\.0\\.0|255\\.255\\.(254|252|248|240|224|192|128|0)\\.0|255\\.255\\.255\\.(254|252|248|240|224|192|128|0)$",
+ "title": "L3 IPv4 network mask",
+ "examples": [
+ "255.255.252.0"
+ ]
+ },
+ "l3_ipv6_netmask": {
+ "$id": "#/definitions/l3_ipv6_netmask",
+ "type": "string",
+ "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$",
+ "title": "L3 IPv6 network mask",
+ "examples": [
+ "ffff:ffff:ffff:ffff::"
+ ]
+ },
+ "l3_ipv4_nw": {
+ "$id": "#/definitions/l3_ipv4_nw",
+ "type": "string",
+ "pattern": "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",
+ "title": "L3 IPv4 network address",
+ "examples": [
+ "0.0.0.0"
+ ]
+ },
+ "l3_ipv6_nw": {
+ "$id": "#/definitions/l3_ipv6_nw",
+ "type": "string",
+ "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$",
+ "title": "L3 IPv6 network address",
+ "examples": [
+ "8000::"
+ ]
+ },
+ "l3_ipv4_gateway": {
+ "$id": "#/definitions/l3_ipv4_gateway",
+ "type": "string",
+ "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
+ "title": "L3 IPv4 gateway address",
+ "examples": [
+ "192.168.200.1"
+ ]
+ },
+ "l3_ipv6_gateway": {
+ "$id": "#/definitions/l3_ipv6_gateway",
+ "type": "string",
+ "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$",
+ "title": "L3 IPv6 gateway address",
+ "examples": [
+ "2001:db8:3:4::192.168.81.99"
+ ]
+ },
+ "l3_ipv4_network_route": {
+ "$id": "#/definitions/l3_ipv4_network_route",
+ "type": "object",
+ "title": "L3 IPv4 routing configuration item",
+ "required": [
+ "gateway",
+ "netmask",
+ "network"
+ ],
+ "properties": {
+ "network": {
+ "$ref": "#/definitions/l3_ipv4_nw"
+ },
+ "netmask": {
+ "$ref": "#/definitions/l3_ipv4_netmask"
+ },
+ "gateway": {
+ "$ref": "#/definitions/l3_ipv4_gateway"
+ },
+ "services": {
+ "$ref": "#/definitions/ipv4_services"
+ }
+ }
+ },
+ "l3_ipv6_network_route": {
+ "$id": "#/definitions/l3_ipv6_network_route",
+ "type": "object",
+ "title": "L3 IPv6 routing configuration item",
+ "required": [
+ "gateway",
+ "netmask",
+ "network"
+ ],
+ "properties": {
+ "network": {
+ "$ref": "#/definitions/l3_ipv6_nw"
+ },
+ "netmask": {
+ "$ref": "#/definitions/l3_ipv6_netmask"
+ },
+ "gateway": {
+ "$ref": "#/definitions/l3_ipv6_gateway"
+ },
+ "services": {
+ "$ref": "#/definitions/ipv6_services"
+ }
+ }
+ },
+ "l3_ipv4_network": {
+ "$id": "#/definitions/l3_ipv4_network",
+ "type": "object",
+ "title": "L3 IPv4 network configuration",
+ "required": [
+ "id",
+ "link",
+ "network_id",
+ "type"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l3_id"
+ },
+ "link": {
+ "$ref": "#/definitions/l3_link"
+ },
+ "network_id": {
+ "$ref": "#/definitions/l3_network_id"
+ },
+ "type": {
+ "$ref": "#/definitions/l3_ipv4_type"
+ },
+ "ip_address": {
+ "$ref": "#/definitions/l3_ipv4_host"
+ },
+ "netmask": {
+ "$ref": "#/definitions/l3_ipv4_netmask"
+ },
+ "routes": {
+ "$id": "#/definitions/l3_ipv4_network/routes",
+ "type": "array",
+ "title": "L3 IPv4 network routes",
+ "items": {
+ "$ref": "#/definitions/l3_ipv4_network_route"
+ }
+ }
+ }
+ },
+ "l3_ipv6_network": {
+ "$id": "#/definitions/l3_ipv6_network",
+ "type": "object",
+ "title": "L3 IPv6 network configuration",
+ "required": [
+ "id",
+ "link",
+ "network_id",
+ "type"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l3_id"
+ },
+ "link": {
+ "$ref": "#/definitions/l3_link"
+ },
+ "network_id": {
+ "$ref": "#/definitions/l3_network_id"
+ },
+ "type": {
+ "$ref": "#/definitions/l3_ipv6_type"
+ },
+ "ip_address": {
+ "$ref": "#/definitions/l3_ipv6_host"
+ },
+ "netmask": {
+ "$ref": "#/definitions/l3_ipv6_netmask"
+ },
+ "routes": {
+ "$id": "#/definitions/properties/l3_ipv6_network/routes",
+ "type": "array",
+ "title": "L3 IPv6 network routes",
+ "items": {
+ "$ref": "#/definitions/l3_ipv6_network_route"
+ }
+ }
+ }
+ },
+ "ipv4_service": {
+ "$id": "#/definitions/ipv4_service",
+ "type": "object",
+ "title": "Service on a IPv4 network",
+ "required": [
+ "address",
+ "type"
+ ],
+ "properties": {
+ "address": {
+ "$ref": "#/definitions/l3_ipv4_host"
+ },
+ "type": {
+ "$id": "#/definitions/ipv4_service/properties/type",
+ "type": "string",
+ "enum": [
+ "dns"
+ ],
+ "title": "Service type",
+ "examples": [
+ "dns"
+ ]
+ }
+ }
+ },
+ "ipv6_service": {
+ "$id": "#/definitions/ipv6_service",
+ "type": "object",
+ "title": "Service on a IPv6 network",
+ "required": [
+ "address",
+ "type"
+ ],
+ "properties": {
+ "address": {
+ "$ref": "#/definitions/l3_ipv6_host"
+ },
+ "type": {
+ "$id": "#/definitions/ipv4_service/properties/type",
+ "type": "string",
+ "enum": [
+ "dns"
+ ],
+ "title": "Service type",
+ "examples": [
+ "dns"
+ ]
+ }
+ }
+ },
+ "ipv4_services": {
+ "$id": "#/definitions/ipv4_services",
+ "type": "array",
+ "title": "Network services on IPv4 network",
+ "items": {
+ "$id": "#/definitions/ipv4_services/items",
+ "$ref": "#/definitions/ipv4_service"
+ }
+ },
+ "ipv6_services": {
+ "$id": "#/definitions/ipv6_services",
+ "type": "array",
+ "title": "Network services on IPv6 network",
+ "items": {
+ "$id": "#/definitions/ipv6_services/items",
+ "$ref": "#/definitions/ipv6_service"
+ }
+ },
+ "services": {
+ "$id": "#/definitions/services",
+ "type": "array",
+ "title": "Network services",
+ "items": {
+ "$id": "#/definitions/services/items",
+ "anyOf": [
+ {
+ "$ref": "#/definitions/ipv4_service"
+ },
+ {
+ "$ref": "#/definitions/ipv6_service"
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/doc/ext/extra_specs.py b/doc/ext/extra_specs.py
new file mode 100644
index 00000000000..534f5fa969e
--- /dev/null
+++ b/doc/ext/extra_specs.py
@@ -0,0 +1,239 @@
+# Copyright 2020, Red Hat, Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Display extra specs in documentation.
+
+Provides a single directive that can be used to list all extra specs validators
+and, thus, document all extra specs that nova recognizes and supports.
+"""
+
+import typing as ty
+
+from docutils import nodes
+from docutils.parsers import rst
+from docutils.parsers.rst import directives
+from docutils import statemachine
+from sphinx import addnodes
+from sphinx import directives as sphinx_directives
+from sphinx import domains
+from sphinx import roles
+from sphinx.util import logging
+from sphinx.util import nodes as sphinx_nodes
+
+from nova.api.validation.extra_specs import base
+from nova.api.validation.extra_specs import validators
+
+LOG = logging.getLogger(__name__)
+
+
+class ExtraSpecXRefRole(roles.XRefRole):
+ """Cross reference a extra spec.
+
+ Example::
+
+ :nova:extra-spec:`hw:cpu_policy`
+ """
+
+ def __init__(self):
+ super(ExtraSpecXRefRole, self).__init__(
+ warn_dangling=True,
+ )
+
+ def process_link(self, env, refnode, has_explicit_title, title, target):
+ # The anchor for the extra spec link is the extra spec name
+ return target, target
+
+
+class ExtraSpecDirective(sphinx_directives.ObjectDescription):
+ """Document an individual extra spec.
+
+ Accepts one required argument - the extra spec name, including the group.
+
+ Example::
+
+ .. extra-spec:: hw:cpu_policy
+ """
+
+ def handle_signature(self, sig, signode):
+ """Transform an option description into RST nodes."""
+ # Insert a node into the output showing the extra spec name
+ signode += addnodes.desc_name(sig, sig)
+ signode['allnames'] = [sig]
+ return sig
+
+ def add_target_and_index(self, firstname, sig, signode):
+ cached_options = self.env.domaindata['nova']['extra_specs']
+ signode['ids'].append(sig)
+ self.state.document.note_explicit_target(signode)
+ # Store the location of the option definition for later use in
+ # resolving cross-references
+ cached_options[sig] = self.env.docname
+
+
+def _indent(text, count=1):
+ if not text:
+ return text
+
+ padding = ' ' * (4 * count)
+ return padding + text
+
+
+def _format_validator_group_help(
+ validators: ty.Dict[str, base.ExtraSpecValidator],
+ summary: bool,
+):
+ """Generate reStructuredText snippets for a group of validators."""
+ for validator in validators.values():
+ for line in _format_validator_help(validator, summary):
+ yield line
+
+
+def _format_validator_help(
+ validator: base.ExtraSpecValidator,
+ summary: bool,
+):
+ """Generate reStucturedText snippets for the provided validator.
+
+ :param validator: A validator to document.
+ :type validator: nova.api.validation.extra_specs.base.ExtraSpecValidator
+ """
+ yield f'.. nova:extra-spec:: {validator.name}'
+ yield ''
+
+ # NOTE(stephenfin): We don't print the pattern, if present, since it's too
+ # internal. Instead, the description should provide this information in a
+ # human-readable format
+ yield _indent(f':Type: {validator.value["type"].__name__}')
+
+ if validator.value.get('min') is not None:
+ yield _indent(f':Min: {validator.value["min"]}')
+
+ if validator.value.get('max') is not None:
+ yield _indent(f':Max: {validator.value["max"]}')
+
+ yield ''
+
+ if not summary:
+ for line in validator.description.splitlines():
+ yield _indent(line)
+
+ yield ''
+
+ if validator.deprecated:
+ yield _indent('.. warning::')
+ yield _indent(
+ 'This extra spec has been deprecated and should not be used.', 2
+ )
+ yield ''
+
+
+class ExtraSpecGroupDirective(rst.Directive):
+ """Document extra specs belonging to the specified group.
+
+ Accepts one optional argument - the extra spec group - and one option -
+ whether to show a summary view only (omit descriptions). Example::
+
+ .. extra-specs:: hw_rng
+ :summary:
+ """
+
+ required_arguments = 0
+ optional_arguments = 1
+ option_spec = {
+ 'summary': directives.flag,
+ }
+ has_content = False
+
+ def run(self):
+ result = statemachine.ViewList()
+ source_name = self.state.document.current_source
+
+ group = self.arguments[0] if self.arguments else None
+ summary = self.options.get('summary', False)
+
+ if group:
+ group_validators = {
+ n.split(':', 1)[1]: v for n, v in validators.VALIDATORS.items()
+ if ':' in n and n.split(':', 1)[0].split('{')[0] == group
+ }
+ else:
+ group_validators = {
+ n: v for n, v in validators.VALIDATORS.items()
+ if ':' not in n
+ }
+
+ if not group_validators:
+ LOG.warning("No validators found for group '%s'", group or '')
+
+ for count, line in enumerate(
+ _format_validator_group_help(group_validators, summary)
+ ):
+ result.append(line, source_name, count)
+ LOG.debug('%5d%s%s', count, ' ' if line else '', line)
+
+ node = nodes.section()
+ node.document = self.state.document
+
+ sphinx_nodes.nested_parse_with_titles(self.state, result, node)
+
+ return node.children
+
+
+class NovaDomain(domains.Domain):
+ """nova domain."""
+ name = 'nova'
+ label = 'nova'
+ object_types = {
+ 'configoption': domains.ObjType(
+ 'extra spec', 'spec',
+ ),
+ }
+ directives = {
+ 'extra-spec': ExtraSpecDirective,
+ }
+ roles = {
+ 'extra-spec': ExtraSpecXRefRole(),
+ }
+ initial_data = {
+ 'extra_specs': {},
+ }
+
+ def resolve_xref(
+ self, env, fromdocname, builder, typ, target, node, contnode,
+ ):
+ """Resolve cross-references"""
+ if typ == 'extra-spec':
+ return sphinx_nodes.make_refnode(
+ builder,
+ fromdocname,
+ env.domaindata['nova']['extra_specs'][target],
+ target,
+ contnode,
+ target,
+ )
+ return None
+
+ def merge_domaindata(self, docnames, otherdata):
+ for target, docname in otherdata['extra_specs'].items():
+ if docname in docnames:
+ self.data['extra_specs'][target] = docname
+
+
+def setup(app):
+ app.add_domain(NovaDomain)
+ app.add_directive('extra-specs', ExtraSpecGroupDirective)
+ return {
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/doc/ext/feature_matrix.py b/doc/ext/feature_matrix.py
index 4934e5894f1..62f4ec0b943 100644
--- a/doc/ext/feature_matrix.py
+++ b/doc/ext/feature_matrix.py
@@ -20,10 +20,8 @@
"""
+import configparser
import re
-import sys
-
-from six.moves import configparser
from docutils import nodes
from docutils.parsers import rst
@@ -159,16 +157,12 @@ def _load_feature_matrix(self):
:returns: Matrix instance
"""
- # SafeConfigParser was deprecated in Python 3.2
- if sys.version_info >= (3, 2):
- cfg = configparser.ConfigParser()
- else:
- cfg = configparser.SafeConfigParser()
+ cfg = configparser.ConfigParser()
env = self.state.document.settings.env
filename = self.arguments[0]
rel_fpath, fpath = env.relfn2path(filename)
with open(fpath) as fp:
- cfg.readfp(fp)
+ cfg.read_file(fp)
# This ensures that the docs are rebuilt whenever the
# .ini file changes
@@ -576,4 +570,8 @@ def _create_notes_paragraph(self, notes):
def setup(app):
app.add_directive('feature_matrix', FeatureMatrixDirective)
- app.add_stylesheet('feature-matrix.css')
+ app.add_css_file('feature-matrix.css')
+ return {
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/doc/ext/versioned_notifications.py b/doc/ext/versioned_notifications.py
index 7972b872770..244e0783b8e 100644
--- a/doc/ext/versioned_notifications.py
+++ b/doc/ext/versioned_notifications.py
@@ -61,6 +61,10 @@ def _import_all_notification_packages(self):
pkgutil.iter_modules(nova.notifications.objects.__path__))))
def _collect_notifications(self):
+ # If you do not see your notification sample showing up in the docs
+ # be sure that the sample filename matches what is registered on the
+ # versioned notification object class using the
+ # @base.notification_sample decorator.
self._import_all_notification_packages()
base.NovaObjectRegistry.register_notification_objects()
notifications = {}
@@ -157,5 +161,9 @@ def _build_markup(self, notifications):
def setup(app):
- app.add_directive('versioned_notifications',
- VersionedNotificationDirective)
+ app.add_directive(
+ 'versioned_notifications', VersionedNotificationDirective)
+ return {
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/doc/notification_samples/aggregate-cache_images-end.json b/doc/notification_samples/aggregate-cache_images-end.json
new file mode 100644
index 00000000000..4c41e0add2f
--- /dev/null
+++ b/doc/notification_samples/aggregate-cache_images-end.json
@@ -0,0 +1,11 @@
+{
+ "priority": "INFO",
+ "payload": {
+ "$ref": "common_payloads/AggregatePayload.json#",
+ "nova_object.data": {
+ "hosts": ["compute"]
+ }
+ },
+ "event_type": "aggregate.cache_images.end",
+ "publisher_id": "nova-api:fake-mini"
+}
diff --git a/doc/notification_samples/aggregate-cache_images-progress.json b/doc/notification_samples/aggregate-cache_images-progress.json
new file mode 100644
index 00000000000..f5eaee34476
--- /dev/null
+++ b/doc/notification_samples/aggregate-cache_images-progress.json
@@ -0,0 +1,20 @@
+{
+ "priority": "INFO",
+ "payload": {
+ "nova_object.version": "1.0",
+ "nova_object.namespace": "nova",
+ "nova_object.name": "AggregateCachePayload",
+ "nova_object.data": {
+ "name": "my-aggregate",
+ "uuid": "788608ec-ebdc-45c5-bc7f-e5f24ab92c80",
+ "host": "compute",
+ "total": 1,
+ "index": 1,
+ "images_cached": ["155d900f-4e14-4e4c-a73d-069cbf4541e6"],
+ "images_failed": [],
+ "id": 1
+ }
+ },
+ "event_type": "aggregate.cache_images.progress",
+ "publisher_id": "nova-conductor:fake-mini"
+}
diff --git a/doc/notification_samples/aggregate-cache_images-start.json b/doc/notification_samples/aggregate-cache_images-start.json
new file mode 100644
index 00000000000..98f38c97664
--- /dev/null
+++ b/doc/notification_samples/aggregate-cache_images-start.json
@@ -0,0 +1,11 @@
+{
+ "priority": "INFO",
+ "payload": {
+ "$ref": "common_payloads/AggregatePayload.json#",
+ "nova_object.data": {
+ "hosts": ["compute"]
+ }
+ },
+ "event_type": "aggregate.cache_images.start",
+ "publisher_id": "nova-api:fake-mini"
+}
diff --git a/doc/notification_samples/common_payloads/BandwidthPayload.json b/doc/notification_samples/common_payloads/BandwidthPayload.json
deleted file mode 100644
index dd1733c464f..00000000000
--- a/doc/notification_samples/common_payloads/BandwidthPayload.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "nova_object.data": {
- "network_name": "private-network",
- "out_bytes": 0,
- "in_bytes": 0
- },
- "nova_object.name": "BandwidthPayload",
- "nova_object.namespace": "nova",
- "nova_object.version": "1.0"
-}
diff --git a/doc/notification_samples/common_payloads/ComputeTaskPayload.json b/doc/notification_samples/common_payloads/ComputeTaskPayload.json
new file mode 100644
index 00000000000..cecddffb6fe
--- /dev/null
+++ b/doc/notification_samples/common_payloads/ComputeTaskPayload.json
@@ -0,0 +1,25 @@
+{
+ "nova_object.version": "1.0",
+ "nova_object.namespace": "nova",
+ "nova_object.name": "ComputeTaskPayload",
+ "nova_object.data": {
+ "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082",
+ "reason": {"$ref": "ExceptionPayload.json#"},
+ "request_spec": {
+ "$ref": "RequestSpecPayload.json#",
+ "nova_object.data": {
+ "flavor": {
+ "nova_object.data": {
+ "extra_specs": {
+ "hw:numa_cpus.0": "0",
+ "hw:numa_mem.0": "512",
+ "hw:numa_nodes": "1"
+ }
+ }
+ },
+ "numa_topology": {"$ref": "InstanceNUMATopologyPayload.json#"}
+ }
+ },
+ "state": "error"
+ }
+}
diff --git a/doc/notification_samples/common_payloads/ExceptionPayload.json b/doc/notification_samples/common_payloads/ExceptionPayload.json
new file mode 100644
index 00000000000..c9dd8150473
--- /dev/null
+++ b/doc/notification_samples/common_payloads/ExceptionPayload.json
@@ -0,0 +1,12 @@
+{
+ "nova_object.version": "1.1",
+ "nova_object.namespace": "nova",
+ "nova_object.name": "ExceptionPayload",
+ "nova_object.data": {
+ "function_name": "_schedule_instances",
+ "module_name": "nova.conductor.manager",
+ "exception": "NoValidHost",
+ "exception_message": "No valid host was found. There are not enough hosts available.",
+ "traceback": "Traceback (most recent call last):\n File \"nova/conductor/manager.py\", line ..."
+ }
+}
diff --git a/doc/notification_samples/common_payloads/ImageMetaPayload.json b/doc/notification_samples/common_payloads/ImageMetaPayload.json
new file mode 100644
index 00000000000..5ea3ed7f5ad
--- /dev/null
+++ b/doc/notification_samples/common_payloads/ImageMetaPayload.json
@@ -0,0 +1,28 @@
+{
+ "nova_object.namespace": "nova",
+ "nova_object.data": {
+ "checksum": null,
+ "container_format": "raw",
+ "created_at": "2011-01-01T01:02:03Z",
+ "direct_url": null,
+ "disk_format": "raw",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "min_disk": 0,
+ "min_ram": 0,
+ "name": "fakeimage123456",
+ "owner": null,
+ "properties": {"$ref":"ImageMetaPropsPayload.json#"},
+ "protected": false,
+ "size": 25165824,
+ "status": "active",
+ "tags": [
+ "tag1",
+ "tag2"
+ ],
+ "updated_at": "2011-01-01T01:02:03Z",
+ "virtual_size": null,
+ "visibility": "public"
+ },
+ "nova_object.name": "ImageMetaPayload",
+ "nova_object.version": "1.0"
+}
diff --git a/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
new file mode 100644
index 00000000000..ef9d49647db
--- /dev/null
+++ b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
@@ -0,0 +1,8 @@
+{
+ "nova_object.namespace": "nova",
+ "nova_object.data": {
+ "hw_architecture": "x86_64"
+ },
+ "nova_object.name": "ImageMetaPropsPayload",
+ "nova_object.version": "1.8"
+}
diff --git a/doc/notification_samples/common_payloads/InstanceActionPayload.json b/doc/notification_samples/common_payloads/InstanceActionPayload.json
index c0886a65939..4906f1428ed 100644
--- a/doc/notification_samples/common_payloads/InstanceActionPayload.json
+++ b/doc/notification_samples/common_payloads/InstanceActionPayload.json
@@ -5,5 +5,5 @@
},
"nova_object.name":"InstanceActionPayload",
"nova_object.namespace":"nova",
- "nova_object.version":"1.7"
+ "nova_object.version":"1.8"
}
diff --git a/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json b/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json
index 35cf2646478..2d05adadedf 100644
--- a/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json
+++ b/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json
@@ -9,5 +9,5 @@
]
},
"nova_object.name": "InstanceActionRebuildPayload",
- "nova_object.version": "1.8"
+ "nova_object.version": "1.9"
}
diff --git a/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json b/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json
index e70dd84b9f8..69703722256 100644
--- a/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json
+++ b/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json
@@ -4,5 +4,5 @@
"rescue_image_ref": "a2459075-d96c-40d5-893e-577ff92e721c"
},
"nova_object.name": "InstanceActionRescuePayload",
- "nova_object.version": "1.2"
+ "nova_object.version": "1.3"
}
diff --git a/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json b/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json
index cf7146b125b..9c32576d69e 100644
--- a/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json
+++ b/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json
@@ -27,5 +27,5 @@
"task_state": "resize_prep"
},
"nova_object.name": "InstanceActionResizePrepPayload",
- "nova_object.version": "1.2"
+ "nova_object.version": "1.3"
}
diff --git a/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json b/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json
index 371e1de3ae0..d0dd7b7f5b5 100644
--- a/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json
+++ b/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json
@@ -5,5 +5,5 @@
},
"nova_object.name":"InstanceActionSnapshotPayload",
"nova_object.namespace":"nova",
- "nova_object.version":"1.8"
+ "nova_object.version":"1.9"
}
diff --git a/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json b/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json
index 289fd3218ed..50108e82157 100644
--- a/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json
+++ b/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json
@@ -5,5 +5,5 @@
},
"nova_object.name": "InstanceActionVolumePayload",
"nova_object.namespace": "nova",
- "nova_object.version": "1.5"
+ "nova_object.version": "1.6"
}
\ No newline at end of file
diff --git a/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json b/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json
index e0445b4375b..ac56306a742 100644
--- a/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json
+++ b/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json
@@ -6,5 +6,5 @@
},
"nova_object.name": "InstanceActionVolumeSwapPayload",
"nova_object.namespace": "nova",
- "nova_object.version": "1.7"
+ "nova_object.version": "1.8"
}
diff --git a/doc/notification_samples/common_payloads/InstanceCreatePayload.json b/doc/notification_samples/common_payloads/InstanceCreatePayload.json
index 3586c9166ed..c7e6adc981a 100644
--- a/doc/notification_samples/common_payloads/InstanceCreatePayload.json
+++ b/doc/notification_samples/common_payloads/InstanceCreatePayload.json
@@ -20,8 +20,9 @@
"trusted_image_certificates": [
"cert-id-1",
"cert-id-2"
- ]
+ ],
+ "instance_name": "instance-00000001"
},
"nova_object.name":"InstanceCreatePayload",
- "nova_object.version": "1.10"
+ "nova_object.version": "1.12"
}
diff --git a/doc/notification_samples/common_payloads/InstanceExistsPayload.json b/doc/notification_samples/common_payloads/InstanceExistsPayload.json
index 735a8ce206f..d045286feb0 100644
--- a/doc/notification_samples/common_payloads/InstanceExistsPayload.json
+++ b/doc/notification_samples/common_payloads/InstanceExistsPayload.json
@@ -2,11 +2,9 @@
"$ref": "InstancePayload.json",
"nova_object.data":{
"audit_period": {"$ref": "AuditPeriodPayload.json#"},
- "bandwidth": [
- {"$ref": "BandwidthPayload.json#"}
- ]
+ "bandwidth": []
},
"nova_object.name":"InstanceExistsPayload",
"nova_object.namespace":"nova",
- "nova_object.version":"1.1"
+ "nova_object.version":"1.2"
}
diff --git a/doc/notification_samples/common_payloads/InstanceNUMACellPayload.json b/doc/notification_samples/common_payloads/InstanceNUMACellPayload.json
new file mode 100644
index 00000000000..221d0d1b9f1
--- /dev/null
+++ b/doc/notification_samples/common_payloads/InstanceNUMACellPayload.json
@@ -0,0 +1,17 @@
+{
+ "nova_object.version": "1.2",
+ "nova_object.namespace": "nova",
+ "nova_object.name": "InstanceNUMACellPayload",
+ "nova_object.data": {
+ "cpu_pinning_raw": null,
+ "cpu_policy": null,
+ "cpu_thread_policy": null,
+ "cpu_topology": null,
+ "cpuset": [0],
+ "pcpuset": [],
+ "cpuset_reserved": null,
+ "id": 0,
+ "memory": 512,
+ "pagesize": null
+ }
+}
diff --git a/doc/notification_samples/common_payloads/InstanceNUMATopologyPayload.json b/doc/notification_samples/common_payloads/InstanceNUMATopologyPayload.json
new file mode 100644
index 00000000000..cf28b2f4332
--- /dev/null
+++ b/doc/notification_samples/common_payloads/InstanceNUMATopologyPayload.json
@@ -0,0 +1,12 @@
+{
+ "nova_object.version": "1.0",
+ "nova_object.namespace": "nova",
+ "nova_object.name": "InstanceNUMATopologyPayload",
+ "nova_object.data": {
+ "cells": [
+ {"$ref": "InstanceNUMACellPayload.json#"}
+ ],
+ "emulator_threads_policy": null,
+ "instance_uuid": "75cab9f7-57e2-4bd1-984f-a0383d9ee60e"
+ }
+}
diff --git a/doc/notification_samples/common_payloads/InstancePCIRequestsPayload.json b/doc/notification_samples/common_payloads/InstancePCIRequestsPayload.json
new file mode 100644
index 00000000000..3ab04139f1c
--- /dev/null
+++ b/doc/notification_samples/common_payloads/InstancePCIRequestsPayload.json
@@ -0,0 +1,9 @@
+{
+ "nova_object.version": "1.0",
+ "nova_object.namespace": "nova",
+ "nova_object.name": "InstancePCIRequestsPayload",
+ "nova_object.data":{
+ "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082",
+ "requests": []
+ }
+}
diff --git a/doc/notification_samples/common_payloads/InstancePayload.json b/doc/notification_samples/common_payloads/InstancePayload.json
index f92c6b43b08..9053ba76d26 100644
--- a/doc/notification_samples/common_payloads/InstancePayload.json
+++ b/doc/notification_samples/common_payloads/InstancePayload.json
@@ -37,9 +37,10 @@
"uuid":"178b0921-8f85-4257-88b6-2e743b5a975c",
"request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d",
"action_initiator_user": "fake",
- "action_initiator_project": "6f70656e737461636b20342065766572"
+ "action_initiator_project": "6f70656e737461636b20342065766572",
+ "locked_reason": null
},
"nova_object.name":"InstancePayload",
"nova_object.namespace":"nova",
- "nova_object.version":"1.7"
+ "nova_object.version":"1.8"
}
diff --git a/doc/notification_samples/common_payloads/InstanceUpdatePayload.json b/doc/notification_samples/common_payloads/InstanceUpdatePayload.json
index 39072fa1b74..2c1c63d7a65 100644
--- a/doc/notification_samples/common_payloads/InstanceUpdatePayload.json
+++ b/doc/notification_samples/common_payloads/InstanceUpdatePayload.json
@@ -29,5 +29,5 @@
},
"nova_object.name": "InstanceUpdatePayload",
"nova_object.namespace": "nova",
- "nova_object.version": "1.8"
+ "nova_object.version": "1.9"
}
\ No newline at end of file
diff --git a/doc/notification_samples/common_payloads/IpPayload.json b/doc/notification_samples/common_payloads/IpPayload.json
index d1f108e8171..bf651c27962 100644
--- a/doc/notification_samples/common_payloads/IpPayload.json
+++ b/doc/notification_samples/common_payloads/IpPayload.json
@@ -8,7 +8,7 @@
"port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
"meta": {},
"version": 4,
- "label": "private-network",
+ "label": "private",
"device_name": "tapce531f90-19"
}
}
diff --git a/doc/notification_samples/common_payloads/RequestSpecPayload.json b/doc/notification_samples/common_payloads/RequestSpecPayload.json
new file mode 100644
index 00000000000..3301c18c589
--- /dev/null
+++ b/doc/notification_samples/common_payloads/RequestSpecPayload.json
@@ -0,0 +1,24 @@
+{
+ "nova_object.namespace": "nova",
+ "nova_object.data": {
+ "availability_zone": null,
+ "flavor": {"$ref": "FlavorPayload.json#"},
+ "ignore_hosts": null,
+ "image": {"$ref": "ImageMetaPayload.json#"},
+ "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082",
+ "num_instances": 1,
+ "numa_topology": null,
+ "pci_requests": {"$ref": "InstancePCIRequestsPayload.json#"},
+ "project_id": "6f70656e737461636b20342065766572",
+ "scheduler_hints": {},
+ "security_groups": ["default"],
+ "force_hosts": null,
+ "force_nodes": null,
+ "instance_group": null,
+ "requested_destination": null,
+ "retry": null,
+ "user_id": "fake"
+ },
+ "nova_object.name": "RequestSpecPayload",
+ "nova_object.version": "1.1"
+}
diff --git a/doc/notification_samples/compute_task-build_instances-error.json b/doc/notification_samples/compute_task-build_instances-error.json
new file mode 100644
index 00000000000..e904e8c1981
--- /dev/null
+++ b/doc/notification_samples/compute_task-build_instances-error.json
@@ -0,0 +1,6 @@
+{
+ "event_type": "compute_task.build_instances.error",
+ "payload": {"$ref":"common_payloads/ComputeTaskPayload.json#"},
+ "priority": "ERROR",
+ "publisher_id": "nova-conductor:fake-mini"
+}
diff --git a/doc/notification_samples/compute_task-migrate_server-error.json b/doc/notification_samples/compute_task-migrate_server-error.json
new file mode 100644
index 00000000000..848b4da37f9
--- /dev/null
+++ b/doc/notification_samples/compute_task-migrate_server-error.json
@@ -0,0 +1,11 @@
+{
+ "event_type": "compute_task.migrate_server.error",
+ "payload": {
+ "$ref":"common_payloads/ComputeTaskPayload.json#",
+ "nova_object.data":{
+ "state": "active"
+ }
+ },
+ "priority": "ERROR",
+ "publisher_id": "nova-conductor:fake-mini"
+}
diff --git a/doc/notification_samples/compute_task-rebuild_server-error.json b/doc/notification_samples/compute_task-rebuild_server-error.json
new file mode 100644
index 00000000000..398600560b7
--- /dev/null
+++ b/doc/notification_samples/compute_task-rebuild_server-error.json
@@ -0,0 +1,8 @@
+{
+ "event_type": "compute_task.rebuild_server.error",
+ "payload": {
+ "$ref": "common_payloads/ComputeTaskPayload.json#"
+ },
+ "priority": "ERROR",
+ "publisher_id": "nova-conductor:fake-mini"
+}
diff --git a/doc/notification_samples/flavor-update.json b/doc/notification_samples/flavor-update.json
index e6af0d70c57..9b2a719f5fd 100644
--- a/doc/notification_samples/flavor-update.json
+++ b/doc/notification_samples/flavor-update.json
@@ -11,8 +11,7 @@
"disabled": false,
"vcpus": 2,
"extra_specs": {
- "key1": "value1",
- "key2": "value2"
+ "hw:numa_nodes": "2"
},
"projects": ["fake_tenant"],
"swap": 0,
diff --git a/doc/notification_samples/instance-delete-end_compute_down.json b/doc/notification_samples/instance-delete-end_compute_down.json
new file mode 100644
index 00000000000..d346095eb34
--- /dev/null
+++ b/doc/notification_samples/instance-delete-end_compute_down.json
@@ -0,0 +1,15 @@
+{
+ "event_type":"instance.delete.end",
+ "payload":{
+ "$ref":"common_payloads/InstanceActionPayload.json#",
+ "nova_object.data":{
+ "block_devices":[],
+ "deleted_at":"2012-10-29T13:42:11Z",
+ "ip_addresses":[],
+ "state":"deleted",
+ "terminated_at":"2012-10-29T13:42:11Z"
+ }
+ },
+ "priority":"INFO",
+ "publisher_id":"nova-api:fake-mini"
+}
diff --git a/doc/notification_samples/instance-delete-end_not_scheduled.json b/doc/notification_samples/instance-delete-end_not_scheduled.json
new file mode 100644
index 00000000000..1fd3c6959f8
--- /dev/null
+++ b/doc/notification_samples/instance-delete-end_not_scheduled.json
@@ -0,0 +1,20 @@
+{
+ "event_type":"instance.delete.end",
+ "payload":{
+ "$ref":"common_payloads/InstanceActionPayload.json#",
+ "nova_object.data":{
+ "availability_zone": null,
+ "block_devices":[],
+ "deleted_at":"2012-10-29T13:42:11Z",
+ "host":null,
+ "ip_addresses":[],
+ "launched_at":null,
+ "node":null,
+ "power_state":"pending",
+ "state":"deleted",
+ "terminated_at":"2012-10-29T13:42:11Z"
+ }
+ },
+ "priority":"INFO",
+ "publisher_id":"nova-api:fake-mini"
+}
diff --git a/doc/notification_samples/instance-delete-start_compute_down.json b/doc/notification_samples/instance-delete-start_compute_down.json
new file mode 100644
index 00000000000..e3ceaf56691
--- /dev/null
+++ b/doc/notification_samples/instance-delete-start_compute_down.json
@@ -0,0 +1,11 @@
+{
+ "event_type":"instance.delete.start",
+ "payload":{
+ "$ref":"common_payloads/InstanceActionPayload.json#",
+ "nova_object.data":{
+ "task_state":"deleting"
+ }
+ },
+ "priority":"INFO",
+ "publisher_id":"nova-api:fake-mini"
+}
diff --git a/doc/notification_samples/instance-delete-start_not_scheduled.json b/doc/notification_samples/instance-delete-start_not_scheduled.json
new file mode 100644
index 00000000000..60597f9e851
--- /dev/null
+++ b/doc/notification_samples/instance-delete-start_not_scheduled.json
@@ -0,0 +1,19 @@
+{
+ "event_type":"instance.delete.start",
+ "payload":{
+ "$ref":"common_payloads/InstanceActionPayload.json#",
+ "nova_object.data":{
+ "availability_zone": null,
+ "block_devices":[],
+ "host":null,
+ "ip_addresses":[],
+ "launched_at":null,
+ "node":null,
+ "power_state":"pending",
+ "state":"error",
+ "task_state":"deleting"
+ }
+ },
+ "priority":"INFO",
+ "publisher_id":"nova-api:fake-mini"
+}
diff --git a/doc/notification_samples/instance-evacuate.json b/doc/notification_samples/instance-evacuate.json
index c3251182c41..57f603de093 100644
--- a/doc/notification_samples/instance-evacuate.json
+++ b/doc/notification_samples/instance-evacuate.json
@@ -5,7 +5,6 @@
"nova_object.data": {
"host": "host2",
"node": "host2",
- "power_state": "pending",
"task_state": "rebuilding",
"action_initiator_user": "admin"
}
diff --git a/doc/notification_samples/instance-interface_attach-end.json b/doc/notification_samples/instance-interface_attach-end.json
index 273cd96f0e0..8fb0ffb6f63 100644
--- a/doc/notification_samples/instance-interface_attach-end.json
+++ b/doc/notification_samples/instance-interface_attach-end.json
@@ -11,7 +11,7 @@
"device_name": "tapce531f90-19",
"address": "192.168.1.3",
"version": 4,
- "label": "private-network",
+ "label": "private",
"port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
"mac": "fa:16:3e:4c:2c:30",
"meta": {}
@@ -25,7 +25,7 @@
"device_name": "tap88dae9fa-0d",
"address": "192.168.1.30",
"version": 4,
- "label": "private-network",
+ "label": "private",
"port_uuid": "88dae9fa-0dc6-49e3-8c29-3abc41e99ac9",
"mac": "00:0c:29:0d:11:74",
"meta": {}
diff --git a/doc/notification_samples/instance-interface_detach-start.json b/doc/notification_samples/instance-interface_detach-start.json
index 6de3067dfe6..591b34c58a5 100644
--- a/doc/notification_samples/instance-interface_detach-start.json
+++ b/doc/notification_samples/instance-interface_detach-start.json
@@ -10,7 +10,7 @@
"device_name": "tapce531f90-19",
"address": "192.168.1.3",
"version": 4,
- "label": "private-network",
+ "label": "private",
"port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
"mac": "fa:16:3e:4c:2c:30",
"meta": {}
@@ -24,7 +24,7 @@
"device_name": "tap88dae9fa-0d",
"address": "192.168.1.30",
"version": 4,
- "label": "private-network",
+ "label": "private",
"port_uuid": "88dae9fa-0dc6-49e3-8c29-3abc41e99ac9",
"mac": "00:0c:29:0d:11:74",
"meta": {}
diff --git a/doc/notification_samples/instance-live_migration_post_dest-end.json b/doc/notification_samples/instance-live_migration_post_dest-end.json
index fe3e08bf6ed..e98d2dabd7c 100644
--- a/doc/notification_samples/instance-live_migration_post_dest-end.json
+++ b/doc/notification_samples/instance-live_migration_post_dest-end.json
@@ -5,7 +5,6 @@
"nova_object.data":{
"host": "host2",
"node": "host2",
- "power_state": "pending",
"action_initiator_user": "admin"
}
},
diff --git a/doc/notification_samples/instance-live_migration_rollback-start.json b/doc/notification_samples/instance-live_migration_rollback-start.json
index 5bffa6057e6..148958d3d59 100644
--- a/doc/notification_samples/instance-live_migration_rollback-start.json
+++ b/doc/notification_samples/instance-live_migration_rollback-start.json
@@ -3,7 +3,8 @@
"payload":{
"$ref":"common_payloads/InstanceActionPayload.json#",
"nova_object.data":{
- "action_initiator_user": "admin"
+ "action_initiator_user": "admin",
+ "task_state": "migrating"
}
},
"priority":"INFO",
diff --git a/doc/notification_samples/instance-live_migration_rollback_dest-end.json b/doc/notification_samples/instance-live_migration_rollback_dest-end.json
index fe36e55be32..745c6990cae 100644
--- a/doc/notification_samples/instance-live_migration_rollback_dest-end.json
+++ b/doc/notification_samples/instance-live_migration_rollback_dest-end.json
@@ -3,7 +3,8 @@
"payload": {
"$ref": "common_payloads/InstanceActionPayload.json#",
"nova_object.data": {
- "action_initiator_user": "admin"
+ "action_initiator_user": "admin",
+ "task_state": "migrating"
}
},
"priority": "INFO",
diff --git a/doc/notification_samples/instance-live_migration_rollback_dest-start.json b/doc/notification_samples/instance-live_migration_rollback_dest-start.json
index 422f7914d35..32858d32526 100644
--- a/doc/notification_samples/instance-live_migration_rollback_dest-start.json
+++ b/doc/notification_samples/instance-live_migration_rollback_dest-start.json
@@ -3,7 +3,8 @@
"payload": {
"$ref": "common_payloads/InstanceActionPayload.json#",
"nova_object.data": {
- "action_initiator_user": "admin"
+ "action_initiator_user": "admin",
+ "task_state": "migrating"
}
},
"priority": "INFO",
diff --git a/doc/notification_samples/instance-lock-with-reason.json b/doc/notification_samples/instance-lock-with-reason.json
new file mode 100644
index 00000000000..45a6847f54f
--- /dev/null
+++ b/doc/notification_samples/instance-lock-with-reason.json
@@ -0,0 +1,12 @@
+{
+ "event_type":"instance.lock",
+ "payload":{
+ "$ref": "common_payloads/InstanceActionPayload.json#",
+ "nova_object.data":{
+ "locked":true,
+ "locked_reason":"global warming"
+ }
+ },
+ "priority":"INFO",
+ "publisher_id":"nova-api:fake-mini"
+}
diff --git a/doc/notification_samples/instance-lock.json b/doc/notification_samples/instance-lock.json
index d542d947360..568f68b99fe 100644
--- a/doc/notification_samples/instance-lock.json
+++ b/doc/notification_samples/instance-lock.json
@@ -3,7 +3,8 @@
"payload":{
"$ref": "common_payloads/InstanceActionPayload.json#",
"nova_object.data":{
- "locked":true
+ "locked":true,
+ "locked_reason": null
}
},
"priority":"INFO",
diff --git a/doc/notification_samples/instance-shelve_offload-end.json b/doc/notification_samples/instance-shelve_offload-end.json
index d7a563557cf..05df5ddd617 100644
--- a/doc/notification_samples/instance-shelve_offload-end.json
+++ b/doc/notification_samples/instance-shelve_offload-end.json
@@ -3,6 +3,7 @@
"payload":{
"$ref": "common_payloads/InstanceActionPayload.json#",
"nova_object.data":{
+ "availability_zone": null,
"state": "shelved_offloaded",
"power_state": "shutdown",
"host": null,
diff --git a/doc/notification_samples/instance-soft_delete-end.json b/doc/notification_samples/instance-soft_delete-end.json
index 632a18369d0..eb25097b686 100644
--- a/doc/notification_samples/instance-soft_delete-end.json
+++ b/doc/notification_samples/instance-soft_delete-end.json
@@ -8,5 +8,5 @@
}
},
"priority":"INFO",
- "publisher_id":"nova-compute:compute"
+ "publisher_id":"nova-compute:fake-mini"
}
diff --git a/doc/notification_samples/instance-soft_delete-start.json b/doc/notification_samples/instance-soft_delete-start.json
index 8557860aa81..d60e3dfd002 100644
--- a/doc/notification_samples/instance-soft_delete-start.json
+++ b/doc/notification_samples/instance-soft_delete-start.json
@@ -8,5 +8,5 @@
}
},
"priority":"INFO",
- "publisher_id":"nova-compute:compute"
+ "publisher_id":"nova-compute:fake-mini"
}
diff --git a/doc/notification_samples/libvirt-connect-error.json b/doc/notification_samples/libvirt-connect-error.json
new file mode 100644
index 00000000000..1d29ac5fa5a
--- /dev/null
+++ b/doc/notification_samples/libvirt-connect-error.json
@@ -0,0 +1,25 @@
+{
+ "event_type": "libvirt.connect.error",
+ "payload": {
+ "nova_object.data": {
+ "reason": {
+ "nova_object.data": {
+ "exception": "libvirtError",
+ "exception_message": "Sample exception for versioned notification test.",
+ "function_name": "_get_connection",
+ "module_name": "nova.virt.libvirt.host",
+ "traceback": "Traceback (most recent call last):\n File \"nova/virt/libvirt/host.py\", line ..."
+ },
+ "nova_object.name": "ExceptionPayload",
+ "nova_object.namespace": "nova",
+ "nova_object.version": "1.1"
+ },
+ "ip": "10.0.2.15"
+ },
+ "nova_object.name": "LibvirtErrorPayload",
+ "nova_object.namespace": "nova",
+ "nova_object.version": "1.0"
+ },
+ "priority": "ERROR",
+ "publisher_id": "nova-compute:compute"
+}
diff --git a/doc/notification_samples/scheduler-select_destinations-end.json b/doc/notification_samples/scheduler-select_destinations-end.json
new file mode 100644
index 00000000000..76535710f1a
--- /dev/null
+++ b/doc/notification_samples/scheduler-select_destinations-end.json
@@ -0,0 +1,6 @@
+{
+ "priority": "INFO",
+ "payload": {"$ref": "common_payloads/RequestSpecPayload.json#"},
+ "event_type": "scheduler.select_destinations.end",
+ "publisher_id": "nova-scheduler:fake-mini"
+}
diff --git a/doc/notification_samples/scheduler-select_destinations-start.json b/doc/notification_samples/scheduler-select_destinations-start.json
new file mode 100644
index 00000000000..b5cacc141ba
--- /dev/null
+++ b/doc/notification_samples/scheduler-select_destinations-start.json
@@ -0,0 +1,6 @@
+{
+ "priority": "INFO",
+ "payload": {"$ref": "common_payloads/RequestSpecPayload.json#"},
+ "event_type": "scheduler.select_destinations.start",
+ "publisher_id": "nova-scheduler:fake-mini"
+}
diff --git a/doc/notification_samples/volume-usage.json b/doc/notification_samples/volume-usage.json
new file mode 100644
index 00000000000..03b89d34d6b
--- /dev/null
+++ b/doc/notification_samples/volume-usage.json
@@ -0,0 +1,22 @@
+{
+ "event_type": "volume.usage",
+ "payload": {
+ "nova_object.data": {
+ "availability_zone": "nova",
+ "instance_uuid": "88fde343-13a8-4047-84fb-2657d5e702f9",
+ "last_refreshed": "2012-10-29T13:42:11Z",
+ "project_id": "6f70656e737461636b20342065766572",
+ "read_bytes": 0,
+ "reads": 0,
+ "user_id": "fake",
+ "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113",
+ "write_bytes": 0,
+ "writes": 0
+ },
+ "nova_object.name": "VolumeUsagePayload",
+ "nova_object.namespace": "nova",
+ "nova_object.version": "1.0"
+ },
+ "priority": "INFO",
+ "publisher_id": "nova-compute:compute"
+}
diff --git a/doc/requirements.txt b/doc/requirements.txt
index fc1af5c4c94..df112fe733f 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -1,15 +1,16 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
+sphinx>=2.0.0,!=2.1.0 # BSD
sphinxcontrib-actdiag>=0.8.5 # BSD
sphinxcontrib-seqdiag>=0.8.4 # BSD
-sphinx-feature-classification>=0.2.0 # Apache-2.0
+sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
+sphinx-feature-classification>=1.1.0 # Apache-2.0
os-api-ref>=1.4.0 # Apache-2.0
-openstackdocstheme>=1.19.0 # Apache-2.0
+openstackdocstheme>=2.2.0 # Apache-2.0
# releasenotes
-reno>=2.5.0 # Apache-2.0
+reno>=3.1.0 # Apache-2.0
# redirect tests in docs
whereto>=0.3.0 # Apache-2.0
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
index 529f1ba33e2..5d361fc91c9 100644
--- a/doc/source/_extra/.htaccess
+++ b/doc/source/_extra/.htaccess
@@ -1,13 +1,7 @@
-# The following is generated with:
-#
-# git log --follow --name-status --format='%H' 2d0dfc632f.. -- doc/source | \
-# grep ^R | grep .rst | cut -f2- | \
-# sed -e 's|doc/source/|redirectmatch 301 ^/nova/([^/]+)/|' -e 's|doc/source/|/nova/$1/|' -e 's/.rst/.html$/' -e 's/.rst/.html/' | \
-# sort
-
redirectmatch 301 ^/nova/([^/]+)/addmethod.openstackapi.html$ /nova/$1/contributor/api-2.html
redirectmatch 301 ^/nova/([^/]+)/admin/flavors2.html$ /nova/$1/admin/flavors.html
redirectmatch 301 ^/nova/([^/]+)/admin/numa.html$ /nova/$1/admin/cpu-topologies.html
+redirectmatch 301 ^/nova/([^/]+)/admin/quotas2.html$ /nova/$1/admin/quotas.html
redirectmatch 301 ^/nova/([^/]+)/aggregates.html$ /nova/$1/user/aggregates.html
redirectmatch 301 ^/nova/([^/]+)/api_microversion_dev.html$ /nova/$1/contributor/microversions.html
redirectmatch 301 ^/nova/([^/]+)/api_microversion_history.html$ /nova/$1/reference/api-microversion-history.html
@@ -21,12 +15,12 @@ redirectmatch 301 ^/nova/([^/]+)/conductor.html$ /nova/$1/user/conductor.html
redirectmatch 301 ^/nova/([^/]+)/development.environment.html$ /nova/$1/contributor/development-environment.html
redirectmatch 301 ^/nova/([^/]+)/devref/api.html /nova/$1/contributor/api.html
redirectmatch 301 ^/nova/([^/]+)/devref/cells.html /nova/$1/user/cells.html
-redirectmatch 301 ^/nova/([^/]+)/devref/filter_scheduler.html /nova/$1/user/filter-scheduler.html
+redirectmatch 301 ^/nova/([^/]+)/devref/filter_scheduler.html /nova/$1/admin/scheduling.html
# catch all, if we hit something in devref assume it moved to
# reference unless we have already triggered a hit above.
redirectmatch 301 ^/nova/([^/]+)/devref/([^/]+).html /nova/$1/reference/$2.html
redirectmatch 301 ^/nova/([^/]+)/feature_classification.html$ /nova/$1/user/feature-classification.html
-redirectmatch 301 ^/nova/([^/]+)/filter_scheduler.html$ /nova/$1/user/filter-scheduler.html
+redirectmatch 301 ^/nova/([^/]+)/filter_scheduler.html$ /nova/$1/admin/scheduling.html
redirectmatch 301 ^/nova/([^/]+)/gmr.html$ /nova/$1/reference/gmr.html
redirectmatch 301 ^/nova/([^/]+)/how_to_get_involved.html$ /nova/$1/contributor/how-to-get-involved.html
redirectmatch 301 ^/nova/([^/]+)/i18n.html$ /nova/$1/reference/i18n.html
@@ -38,10 +32,7 @@ redirectmatch 301 ^/nova/([^/]+)/man/nova-cells.html$ /nova/$1/cli/nova-cells.ht
# this is gone and never coming back, indicate that to the end users
redirectmatch 301 ^/nova/([^/]+)/man/nova-compute.html$ /nova/$1/cli/nova-compute.html
redirectmatch 301 ^/nova/([^/]+)/man/nova-conductor.html$ /nova/$1/cli/nova-conductor.html
-redirectmatch 301 ^/nova/([^/]+)/man/nova-console.html$ /nova/$1/cli/nova-console.html
-redirectmatch 301 ^/nova/([^/]+)/man/nova-consoleauth.html$ /nova/$1/cli/nova-consoleauth.html
redirectmatch 301 ^/nova/([^/]+)/man/nova-dhcpbridge.html$ /nova/$1/cli/nova-dhcpbridge.html
-redirectmatch 301 ^/nova/([^/]+)/man/nova-idmapshift.html$ /nova/$1/cli/nova-idmapshift.html
redirectmatch 301 ^/nova/([^/]+)/man/nova-manage.html$ /nova/$1/cli/nova-manage.html
redirectmatch 301 ^/nova/([^/]+)/man/nova-network.html$ /nova/$1/cli/nova-network.html
redirectmatch 301 ^/nova/([^/]+)/man/nova-novncproxy.html$ /nova/$1/cli/nova-novncproxy.html
@@ -50,7 +41,6 @@ redirectmatch 301 ^/nova/([^/]+)/man/nova-scheduler.html$ /nova/$1/cli/nova-sche
redirectmatch 301 ^/nova/([^/]+)/man/nova-serialproxy.html$ /nova/$1/cli/nova-serialproxy.html
redirectmatch 301 ^/nova/([^/]+)/man/nova-spicehtml5proxy.html$ /nova/$1/cli/nova-spicehtml5proxy.html
redirectmatch 301 ^/nova/([^/]+)/man/nova-status.html$ /nova/$1/cli/nova-status.html
-redirectmatch 301 ^/nova/([^/]+)/man/nova-xvpvncproxy.html$ /nova/$1/cli/nova-xvpvncproxy.html
redirectmatch 301 ^/nova/([^/]+)/notifications.html$ /nova/$1/reference/notifications.html
redirectmatch 301 ^/nova/([^/]+)/placement.html$ /nova/$1/user/placement.html
redirectmatch 301 ^/nova/([^/]+)/placement_dev.html$ /nova/$1/contributor/placement.html
@@ -72,8 +62,22 @@ redirectmatch 301 ^/nova/([^/]+)/testing/libvirt-numa.html$ /nova/$1/contributor
redirectmatch 301 ^/nova/([^/]+)/testing/serial-console.html$ /nova/$1/contributor/testing/serial-console.html
redirectmatch 301 ^/nova/([^/]+)/testing/zero-downtime-upgrade.html$ /nova/$1/contributor/testing/zero-downtime-upgrade.html
redirectmatch 301 ^/nova/([^/]+)/threading.html$ /nova/$1/reference/threading.html
-redirectmatch 301 ^/nova/([^/]+)/upgrade.html$ /nova/$1/user/upgrade.html
-redirectmatch 301 ^/nova/([^/]+)/vendordata.html$ /nova/$1/user/vendordata.html
+redirectmatch 301 ^/nova/([^/]+)/upgrade.html$ /nova/$1/admin/upgrades.html
+redirectmatch 301 ^/nova/([^/]+)/user/aggregates.html$ /nova/$1/admin/aggregates.html
+redirectmatch 301 ^/nova/([^/]+)/user/cellsv2_layout.html$ /nova/$1/user/cellsv2-layout.html
+redirectmatch 301 ^/nova/([^/]+)/user/config-drive.html$ /nova/$1/user/metadata.html
+redirectmatch 301 ^/nova/([^/]+)/user/filter-scheduler.html$ /nova/$1/admin/scheduling.html
+redirectmatch 301 ^/nova/([^/]+)/user/metadata-service.html$ /nova/$1/user/metadata.html
+redirectmatch 301 ^/nova/([^/]+)/user/placement.html$ /placement/$1/
+redirectmatch 301 ^/nova/([^/]+)/user/upgrade.html$ /nova/$1/admin/upgrades.html
+redirectmatch 301 ^/nova/([^/]+)/user/user-data.html$ /nova/$1/user/metadata.html
+redirectmatch 301 ^/nova/([^/]+)/user/vendordata.html$ /nova/$1/user/metadata.html
+redirectmatch 301 ^/nova/([^/]+)/vendordata.html$ /nova/$1/user/metadata.html
redirectmatch 301 ^/nova/([^/]+)/vmstates.html$ /nova/$1/reference/vm-states.html
redirectmatch 301 ^/nova/([^/]+)/wsgi.html$ /nova/$1/user/wsgi.html
-redirectmatch 301 ^/nova/([^/]+)/user/cellsv2_layout.html$ /nova/$1/user/cellsv2-layout.html
+redirectmatch 301 ^/nova/([^/]+)/admin/adv-config.html$ /nova/$1/admin/index.html
+redirectmatch 301 ^/nova/([^/]+)/admin/configuration/schedulers.html$ /nova/$1/admin/scheduling.html
+redirectmatch 301 ^/nova/([^/]+)/admin/system-admin.html$ /nova/$1/admin/index.html
+redirectmatch 301 ^/nova/([^/]+)/admin/port_with_resource_request.html$ /nova/$1/admin/ports-with-resource-requests.html
+redirectmatch 301 ^/nova/([^/]+)/admin/manage-users.html$ /nova/$1/admin/arch.html
+redirectmatch 301 ^/nova/([^/]+)/admin/mitigation-for-Intel-MDS-security-flaws.html /nova/$1/admin/cpu-models.html
diff --git a/doc/source/figures/nova-weighting-hosts.png b/doc/source/_static/images/nova-weighting-hosts.png
similarity index 100%
rename from doc/source/figures/nova-weighting-hosts.png
rename to doc/source/_static/images/nova-weighting-hosts.png
diff --git a/doc/source/_static/images/traits-taxonomy.svg b/doc/source/_static/images/traits-taxonomy.svg
new file mode 100644
index 00000000000..e05884a20f6
--- /dev/null
+++ b/doc/source/_static/images/traits-taxonomy.svg
@@ -0,0 +1,330 @@
+
+
diff --git a/doc/source/figures/vmware-nova-driver-architecture.jpg b/doc/source/_static/images/vmware-nova-driver-architecture.jpg
similarity index 100%
rename from doc/source/figures/vmware-nova-driver-architecture.jpg
rename to doc/source/_static/images/vmware-nova-driver-architecture.jpg
diff --git a/doc/source/figures/xenserver_architecture.png b/doc/source/_static/images/xenserver_architecture.png
similarity index 100%
rename from doc/source/figures/xenserver_architecture.png
rename to doc/source/_static/images/xenserver_architecture.png
diff --git a/doc/source/admin/admin-password-injection.rst b/doc/source/admin/admin-password-injection.rst
index dbd0081da9d..278f7b02a8c 100644
--- a/doc/source/admin/admin-password-injection.rst
+++ b/doc/source/admin/admin-password-injection.rst
@@ -10,18 +10,9 @@ command. You can also view and set the admin password from the dashboard.
.. rubric:: Password injection using the dashboard
-By default, the dashboard will display the ``admin`` password and allow the
-user to modify it.
-
-If you do not want to support password injection, disable the password fields
-by editing the dashboard's ``local_settings.py`` file.
-
-.. code-block:: none
-
- OPENSTACK_HYPERVISOR_FEATURES = {
- ...
- 'can_set_password': False,
- }
+For password injection display in the dashboard, please refer to the setting of
+``can_set_password`` in :horizon-doc:`Horizon doc
+`
.. rubric:: Password injection on libvirt-based hypervisors
@@ -45,16 +36,13 @@ the ``/etc/shadow`` file inside the virtual machine instance.
Users can only use :command:`ssh` to access the instance by using the admin
password if the virtual machine image is a Linux distribution, and it has
- been configured to allow users to use :command:`ssh` as the root user. This
- is not the case for `Ubuntu cloud images `_
+ been configured to allow users to use :command:`ssh` as the root user with
+ password authorization. This is not the case for
+ `Ubuntu cloud images `_
which, by default, does not allow users to use :command:`ssh` to access the
- root account.
-
-.. rubric:: Password injection and XenAPI (XenServer/XCP)
-
-When using the XenAPI hypervisor back end, Compute uses the XenAPI agent to
-inject passwords into guests. The virtual machine image must be configured with
-the agent for password injection to work.
+ root account, or
+ `CentOS cloud images `_ which, by default,
+ does not allow :command:`ssh` access to the instance with password.
.. rubric:: Password injection and Windows images (all hypervisors)
diff --git a/doc/source/admin/adv-config.rst b/doc/source/admin/adv-config.rst
deleted file mode 100644
index 82e6be6b32d..00000000000
--- a/doc/source/admin/adv-config.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-======================
-Advanced configuration
-======================
-
-OpenStack clouds run on platforms that differ greatly in the capabilities that
-they provide. By default, the Compute service seeks to abstract the underlying
-hardware that it runs on, rather than exposing specifics about the underlying
-host platforms. This abstraction manifests itself in many ways. For example,
-rather than exposing the types and topologies of CPUs running on hosts, the
-service exposes a number of generic CPUs (virtual CPUs, or vCPUs) and allows
-for overcommitting of these. In a similar manner, rather than exposing the
-individual types of network devices available on hosts, generic
-software-powered network ports are provided. These features are designed to
-allow high resource utilization and allows the service to provide a generic
-cost-effective and highly scalable cloud upon which to build applications.
-
-This abstraction is beneficial for most workloads. However, there are some
-workloads where determinism and per-instance performance are important, if not
-vital. In these cases, instances can be expected to deliver near-native
-performance. The Compute service provides features to improve individual
-instance for these kind of workloads.
-
-.. toctree::
- :maxdepth: 2
-
- pci-passthrough
- cpu-topologies
- huge-pages
- virtual-gpu
- file-backed-memory
diff --git a/doc/source/admin/aggregates.rst b/doc/source/admin/aggregates.rst
new file mode 100644
index 00000000000..621af8caa42
--- /dev/null
+++ b/doc/source/admin/aggregates.rst
@@ -0,0 +1,394 @@
+===============
+Host aggregates
+===============
+
+Host aggregates are a mechanism for partitioning hosts in an OpenStack cloud,
+or a region of an OpenStack cloud, based on arbitrary characteristics.
+Examples where an administrator may want to do this include where a group of
+hosts have additional hardware or performance characteristics.
+
+Host aggregates started out as a way to use Xen hypervisor resource pools, but
+have been generalized to provide a mechanism to allow administrators to assign
+key-value pairs to groups of machines. Each node can have multiple aggregates,
+each aggregate can have multiple key-value pairs, and the same key-value pair
+can be assigned to multiple aggregates. This information can be used in the
+scheduler to enable advanced scheduling, to set up Xen hypervisor resource
+pools or to define logical groups for migration.
+
+Host aggregates are not explicitly exposed to users. Instead administrators map
+flavors to host aggregates. Administrators do this by setting metadata on a
+host aggregate, and matching flavor extra specifications. The scheduler then
+endeavors to match user requests for instances of the given flavor to a host
+aggregate with the same key-value pair in its metadata. Compute nodes can be in
+more than one host aggregate. Weight multipliers can be controlled on a
+per-aggregate basis by setting the desired ``xxx_weight_multiplier`` aggregate
+metadata.
+
+Administrators are able to optionally expose a host aggregate as an
+:term:`Availability Zone`. Availability zones are different from host
+aggregates in that they are explicitly exposed to the user, and hosts can only
+be in a single availability zone. Administrators can configure a default
+availability zone where instances will be scheduled when the user fails to
+specify one. For more information on how to do this, refer to
+:doc:`/admin/availability-zones`.
+
+
+.. _config-sch-for-aggs:
+
+Configure scheduler to support host aggregates
+----------------------------------------------
+
+One common use case for host aggregates is when you want to support scheduling
+instances to a subset of compute hosts because they have a specific capability.
+For example, you may want to allow users to request compute hosts that have SSD
+drives if they need access to faster disk I/O, or access to compute hosts that
+have GPU cards to take advantage of GPU-accelerated code.
+
+To configure the scheduler to support host aggregates, the
+:oslo.config:option:`filter_scheduler.enabled_filters` configuration option
+must contain the ``AggregateInstanceExtraSpecsFilter`` in addition to the other
+filters used by the scheduler. Add the following line to ``nova.conf`` on the
+host that runs the ``nova-scheduler`` service to enable host aggregates
+filtering, as well as the other filters that are typically enabled:
+
+.. code-block:: ini
+
+ [filter_scheduler]
+ enabled_filters=...,AggregateInstanceExtraSpecsFilter
+
+Example: Specify compute hosts with SSDs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This example configures the Compute service to enable users to request nodes
+that have solid-state drives (SSDs). You create a ``fast-io`` host aggregate in
+the ``nova`` availability zone and you add the ``ssd=true`` key-value pair to
+the aggregate. Then, you add the ``node1``, and ``node2`` compute nodes to it.
+
+.. code-block:: console
+
+ $ openstack aggregate create --zone nova fast-io
+ +-------------------+----------------------------+
+ | Field | Value |
+ +-------------------+----------------------------+
+ | availability_zone | nova |
+ | created_at | 2016-12-22T07:31:13.013466 |
+ | deleted | False |
+ | deleted_at | None |
+ | id | 1 |
+ | name | fast-io |
+ | updated_at | None |
+ +-------------------+----------------------------+
+
+ $ openstack aggregate set --property ssd=true 1
+ +-------------------+----------------------------+
+ | Field | Value |
+ +-------------------+----------------------------+
+ | availability_zone | nova |
+ | created_at | 2016-12-22T07:31:13.000000 |
+ | deleted | False |
+ | deleted_at | None |
+ | hosts | [] |
+ | id | 1 |
+ | name | fast-io |
+ | properties | ssd='true' |
+ | updated_at | None |
+ +-------------------+----------------------------+
+
+ $ openstack aggregate add host 1 node1
+ +-------------------+--------------------------------------------------+
+ | Field | Value |
+ +-------------------+--------------------------------------------------+
+ | availability_zone | nova |
+ | created_at | 2016-12-22T07:31:13.000000 |
+ | deleted | False |
+ | deleted_at | None |
+ | hosts | [u'node1'] |
+ | id | 1 |
+ | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} |
+ | name | fast-io |
+ | updated_at | None |
+ +-------------------+--------------------------------------------------+
+
+ $ openstack aggregate add host 1 node2
+ +-------------------+--------------------------------------------------+
+ | Field | Value |
+ +-------------------+--------------------------------------------------+
+ | availability_zone | nova |
+ | created_at | 2016-12-22T07:31:13.000000 |
+ | deleted | False |
+ | deleted_at | None |
+ | hosts | [u'node1', u'node2'] |
+ | id | 1 |
+ | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} |
+ | name | fast-io |
+ | updated_at | None |
+ +-------------------+--------------------------------------------------+
+
+Use the :command:`openstack flavor create` command to create the ``ssd.large``
+flavor called with an ID of 6, 8 GB of RAM, 80 GB root disk, and 4 vCPUs.
+
+.. code-block:: console
+
+ $ openstack flavor create --id 6 --ram 8192 --disk 80 --vcpus 4 ssd.large
+ +----------------------------+-----------+
+ | Field | Value |
+ +----------------------------+-----------+
+ | OS-FLV-DISABLED:disabled | False |
+ | OS-FLV-EXT-DATA:ephemeral | 0 |
+ | disk | 80 |
+ | id | 6 |
+ | name | ssd.large |
+ | os-flavor-access:is_public | True |
+ | ram | 8192 |
+ | rxtx_factor | 1.0 |
+ | swap | |
+ | vcpus | 4 |
+ +----------------------------+-----------+
+
+Once the flavor is created, specify one or more key-value pairs that match the
+key-value pairs on the host aggregates with scope
+``aggregate_instance_extra_specs``. In this case, that is the
+``aggregate_instance_extra_specs:ssd=true`` key-value pair. Setting a
+key-value pair on a flavor is done using the :command:`openstack flavor set`
+command.
+
+.. code-block:: console
+
+ $ openstack flavor set \
+ --property aggregate_instance_extra_specs:ssd=true ssd.large
+
+Once it is set, you should see the ``extra_specs`` property of the
+``ssd.large`` flavor populated with a key of ``ssd`` and a corresponding value
+of ``true``.
+
+.. code-block:: console
+
+ $ openstack flavor show ssd.large
+ +----------------------------+-------------------------------------------+
+ | Field | Value |
+ +----------------------------+-------------------------------------------+
+ | OS-FLV-DISABLED:disabled | False |
+ | OS-FLV-EXT-DATA:ephemeral | 0 |
+ | disk | 80 |
+ | id | 6 |
+ | name | ssd.large |
+ | os-flavor-access:is_public | True |
+ | properties | aggregate_instance_extra_specs:ssd='true' |
+ | ram | 8192 |
+ | rxtx_factor | 1.0 |
+ | swap | |
+ | vcpus | 4 |
+ +----------------------------+-------------------------------------------+
+
+Now, when a user requests an instance with the ``ssd.large`` flavor,
+the scheduler only considers hosts with the ``ssd=true`` key-value pair.
+In this example, these are ``node1`` and ``node2``.
+
+
+Aggregates in Placement
+-----------------------
+
+Aggregates also exist in placement and are not the same thing as host
+aggregates in nova. These aggregates are defined (purely) as groupings of
+related resource providers. Since compute nodes in nova are represented in
+placement as resource providers, they can be added to a placement aggregate as
+well. For example, get the UUID of the compute node using :command:`openstack
+hypervisor list` and add it to an aggregate in placement using
+:command:`openstack resource provider aggregate set`.
+
+.. code-block:: console
+
+ $ openstack --os-compute-api-version=2.53 hypervisor list
+ +--------------------------------------+---------------------+-----------------+-----------------+-------+
+ | ID | Hypervisor Hostname | Hypervisor Type | Host IP | State |
+ +--------------------------------------+---------------------+-----------------+-----------------+-------+
+ | 815a5634-86fb-4e1e-8824-8a631fee3e06 | node1 | QEMU | 192.168.1.123 | up |
+ +--------------------------------------+---------------------+-----------------+-----------------+-------+
+
+ $ openstack --os-placement-api-version=1.2 resource provider aggregate set \
+ --aggregate df4c74f3-d2c4-4991-b461-f1a678e1d161 \
+ 815a5634-86fb-4e1e-8824-8a631fee3e06
+
+Some scheduling filter operations can be performed by placement for increased
+speed and efficiency.
+
+.. note::
+
+ The nova-api service attempts (as of nova 18.0.0) to automatically mirror
+ the association of a compute host with an aggregate when an administrator
+ adds or removes a host to/from a nova host aggregate. This should alleviate
+ the need to manually create those association records in the placement API
+ using the ``openstack resource provider aggregate set`` CLI invocation.
+
+
+.. _tenant-isolation-with-placement:
+
+Tenant Isolation with Placement
+-------------------------------
+
+In order to use placement to isolate tenants, there must be placement
+aggregates that match the membership and UUID of nova host aggregates that you
+want to use for isolation. The same key pattern in aggregate metadata used by
+the :ref:`AggregateMultiTenancyIsolation` filter controls this function, and is
+enabled by setting
+:oslo.config:option:`scheduler.limit_tenants_to_placement_aggregate` to
+``True``.
+
+.. code-block:: console
+
+ $ openstack --os-compute-api-version=2.53 aggregate create myagg
+ +-------------------+--------------------------------------+
+ | Field | Value |
+ +-------------------+--------------------------------------+
+ | availability_zone | None |
+ | created_at | 2018-03-29T16:22:23.175884 |
+ | deleted | False |
+ | deleted_at | None |
+ | id | 4 |
+ | name | myagg |
+ | updated_at | None |
+ | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 |
+ +-------------------+--------------------------------------+
+
+ $ openstack --os-compute-api-version=2.53 aggregate add host myagg node1
+ +-------------------+--------------------------------------+
+ | Field | Value |
+ +-------------------+--------------------------------------+
+ | availability_zone | None |
+ | created_at | 2018-03-29T16:22:23.175884 |
+ | deleted | False |
+ | deleted_at | None |
+ | hosts | [u'node1'] |
+ | id | 4 |
+ | name | myagg |
+ | updated_at | None |
+ | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 |
+ +-------------------+--------------------------------------+
+
+ $ openstack project list -f value | grep 'demo'
+ 9691591f913949818a514f95286a6b90 demo
+
+ $ openstack aggregate set \
+ --property filter_tenant_id=9691591f913949818a514f95286a6b90 myagg
+
+ $ openstack --os-placement-api-version=1.2 resource provider aggregate set \
+ --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 \
+ 815a5634-86fb-4e1e-8824-8a631fee3e06
+
+Note that the ``filter_tenant_id`` metadata key can be optionally suffixed
+with any string for multiple tenants, such as ``filter_tenant_id3=$tenantid``.
+
+
+Usage
+-----
+
+Much of the configuration of host aggregates is driven from the API or
+command-line clients. For example, to create a new aggregate and add hosts to
+it using the :command:`openstack` client, run:
+
+.. code-block:: console
+
+ $ openstack aggregate create my-aggregate
+ $ openstack aggregate add host my-aggregate my-host
+
+To list all aggregates and show information about a specific aggregate, run:
+
+.. code-block:: console
+
+ $ openstack aggregate list
+ $ openstack aggregate show my-aggregate
+
+To set and unset a property on the aggregate, run:
+
+.. code-block:: console
+
+ $ openstack aggregate set --property pinned=true my-aggregrate
+ $ openstack aggregate unset --property pinned my-aggregate
+
+To rename the aggregate, run:
+
+.. code-block:: console
+
+ $ openstack aggregate set --name my-awesome-aggregate my-aggregate
+
+To remove a host from an aggregate and delete the aggregate, run:
+
+.. code-block:: console
+
+ $ openstack aggregate remove host my-aggregate my-host
+ $ openstack aggregate delete my-aggregate
+
+For more information, refer to the :python-openstackclient-doc:`OpenStack
+Client documentation `.
+
+
+Configuration
+-------------
+
+In addition to CRUD operations enabled by the API and clients, the following
+configuration options can be used to configure how host aggregates and the
+related availability zones feature operate under the hood:
+
+- :oslo.config:option:`default_schedule_zone`
+- :oslo.config:option:`scheduler.limit_tenants_to_placement_aggregate`
+- :oslo.config:option:`cinder.cross_az_attach`
+
+Finally, as discussed previously, there are a number of host aggregate-specific
+scheduler filters. These are:
+
+- :ref:`AggregateImagePropertiesIsolation`
+- :ref:`AggregateInstanceExtraSpecsFilter`
+- :ref:`AggregateIoOpsFilter`
+- :ref:`AggregateMultiTenancyIsolation`
+- :ref:`AggregateNumInstancesFilter`
+- :ref:`AggregateTypeAffinityFilter`
+
+The following configuration options are applicable to the scheduler
+configuration:
+
+- :oslo.config:option:`cpu_allocation_ratio`
+- :oslo.config:option:`ram_allocation_ratio`
+- :oslo.config:option:`filter_scheduler.max_instances_per_host`
+- :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_separator`
+- :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_namespace`
+
+.. _image-caching-aggregates:
+
+Image Caching
+-------------
+
+Aggregates can be used as a way to target multiple compute nodes for the purpose of
+requesting that images be pre-cached for performance reasons.
+
+.. note::
+
+ `Some of the virt drivers`_ provide image caching support, which improves performance
+ of second-and-later boots of the same image by keeping the base image in an on-disk
+ cache. This avoids the need to re-download the image from Glance, which reduces
+ network utilization and time-to-boot latency. Image pre-caching is the act of priming
+ that cache with images ahead of time to improve performance of the first boot.
+
+.. _Some of the virt drivers: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_cache_images
+
+Assuming an aggregate called ``my-aggregate`` where two images should
+be pre-cached, running the following command will initiate the
+request:
+
+.. code-block:: console
+
+ $ nova aggregate-cache-images my-aggregate image1 image2
+
+Note that image pre-caching happens asynchronously in a best-effort
+manner. The images and aggregate provided are checked by the server
+when the command is run, but the compute nodes are not checked to see
+if they support image caching until the process runs. Progress and
+results are logged by each compute, and the process sends
+``aggregate.cache_images.start``, ``aggregate.cache_images.progress``,
+and ``aggregate.cache_images.end`` notifications, which may be useful
+for monitoring the operation externally.
+
+References
+----------
+
+- `Curse your bones, Availability Zones! (Openstack Summit Vancouver 2018)
+ `__
diff --git a/doc/source/admin/arch.rst b/doc/source/admin/arch.rst
index c7fe4d28b3c..c141fabcbdf 100644
--- a/doc/source/admin/arch.rst
+++ b/doc/source/admin/arch.rst
@@ -43,7 +43,7 @@ Compute controls hypervisors through an API server. Selecting the best
hypervisor to use can be difficult, and you must take budget, resource
constraints, supported features, and required technical specifications into
account. However, the majority of OpenStack development is done on systems
-using KVM and Xen-based hypervisors. For a detailed list of features and
+using KVM-based hypervisors. For a detailed list of features and
support across different hypervisors, see :doc:`/user/support-matrix`.
You can also orchestrate clouds using multiple hypervisors in different
@@ -51,24 +51,25 @@ availability zones. Compute supports the following hypervisors:
- :ironic-doc:`Baremetal <>`
-- `Docker `__
-
- `Hyper-V
- `__
+ `__
- `Kernel-based Virtual Machine (KVM)
- `__
+ `__
+
+- `Linux Containers (LXC) `__
-- `Linux Containers (LXC) `__
+- `PowerVM `__
-- `Quick Emulator (QEMU) `__
+- `Quick Emulator (QEMU) `__
-- `User Mode Linux (UML) `__
+- `Virtuozzo `__
- `VMware vSphere
`__
-- `Xen `__
+
+- `zVM `__
For more information about hypervisors, see
:doc:`/admin/configuration/hypervisors`
@@ -77,6 +78,9 @@ section in the Nova Configuration Reference.
Projects, users, and roles
~~~~~~~~~~~~~~~~~~~~~~~~~~
+To begin using Compute, you must create a user with the
+:keystone-doc:`Identity service <>`.
+
The Compute system is designed to be used by different consumers in the form of
projects on a shared system, and role-based access assignments. Roles control
the actions that a user is allowed to perform.
@@ -103,7 +107,7 @@ For projects, you can use quota controls to limit the:
Roles control the actions a user is allowed to perform. By default, most
actions do not require a particular role, but you can configure them by editing
-the ``policy.json`` file for user roles. For example, a rule can be defined so
+the ``policy.yaml`` file for user roles. For example, a rule can be defined so
that a user must have the ``admin`` role in order to be able to allocate a
public IP address.
@@ -228,7 +232,7 @@ The displayed image attributes are:
Virtual hardware templates are called ``flavors``. By default, these are
configurable by admin users, however that behavior can be changed by redefining
the access controls for ``compute_extension:flavormanage`` in
-``/etc/nova/policy.json`` on the ``compute-api`` server.
+``/etc/nova/policy.yaml`` on the ``compute-api`` server.
For more information, refer to :doc:`/configuration/policy`.
For a list of flavors that are available on your system:
diff --git a/doc/source/admin/availability-zones.rst b/doc/source/admin/availability-zones.rst
index dc4f8963344..678aff2c5a5 100644
--- a/doc/source/admin/availability-zones.rst
+++ b/doc/source/admin/availability-zones.rst
@@ -1,70 +1,284 @@
-=========================================
-Select hosts where instances are launched
-=========================================
+==================
+Availability Zones
+==================
-With the appropriate permissions, you can select which host instances are
-launched on and which roles can boot instances on this host.
+.. note::
-#. To select the host where instances are launched, use the
- ``--availability-zone ZONE:HOST:NODE`` parameter on the :command:`openstack
- server create` command.
+ This section provides deployment and admin-user usage information about the
+ availability zone feature. For end-user information about availability
+ zones, refer to the :doc:`user guide `.
- For example:
+Availability Zones are an end-user visible logical abstraction for partitioning
+a cloud without knowing the physical infrastructure. Availability zones are not
+modeled in the database; rather, they are defined by attaching specific
+metadata information to an :doc:`aggregate ` The addition of
+this specific metadata to an aggregate makes the aggregate visible from an
+end-user perspective and consequently allows users to schedule instances to a
+specific set of hosts, the ones belonging to the aggregate.
- .. code-block:: console
+However, despite their similarities, there are a few additional differences to
+note when comparing availability zones and host aggregates:
- $ openstack server create --image IMAGE --flavor m1.tiny \
- --key-name KEY --availability-zone ZONE:HOST:NODE \
- --nic net-id=UUID SERVER
+- A host can be part of multiple aggregates but it can only be in one
+ availability zone.
- .. note::
+- By default a host is part of a default availability zone even if it doesn't
+ belong to an aggregate. The name of this default availability zone can be
+ configured using :oslo.config:option:`default_availability_zone` config
+ option.
- HOST and NODE are optional parameters. In such cases, use the
- ``--availability-zone ZONE::NODE``, ``--availability-zone ZONE:HOST`` or
- ``--availability-zone ZONE``.
+ .. warning::
-#. To specify which roles can launch an instance on a specified host, enable
- the ``create:forced_host`` option in the ``policy.json`` file. By default,
- this option is enabled for only the admin role. If you see ``Forbidden (HTTP
- 403)`` in return, then you are not using admin credentials.
+ The use of the default availability zone name in requests can be very
+ error-prone. Since the user can see the list of availability zones, they
+ have no way to know whether the default availability zone name (currently
+ ``nova``) is provided because an host belongs to an aggregate whose AZ
+ metadata key is set to ``nova``, or because there is at least one host
+ not belonging to any aggregate. Consequently, it is highly recommended
+ for users to never ever ask for booting an instance by specifying an
+ explicit AZ named ``nova`` and for operators to never set the AZ metadata
+ for an aggregate to ``nova``. This can result is some problems due to the
+ fact that the instance AZ information is explicitly attached to ``nova``
+ which could break further move operations when either the host is moved
+ to another aggregate or when the user would like to migrate the instance.
-#. To view the list of valid zones, use the :command:`openstack availability
- zone list` command.
+ .. note::
- .. code-block:: console
+ Availability zone names must NOT contain ``:`` since it is used by admin
+ users to specify hosts where instances are launched in server creation.
+ See `Using availability zones to select hosts`_ for more information.
- $ openstack availability zone list
- +-----------+-------------+
- | Zone Name | Zone Status |
- +-----------+-------------+
- | zone1 | available |
- | zone2 | available |
- +-----------+-------------+
+In addition, other services, such as the :neutron-doc:`networking service <>`
+and the :cinder-doc:`block storage service <>`, also provide an availability
+zone feature. However, the implementation of these features differs vastly
+between these different services. Consult the documentation for these other
+services for more information on their implementation of this feature.
-#. To view the list of valid compute hosts, use the :command:`openstack host
- list` command.
- .. code-block:: console
+.. _availability-zones-with-placement:
- $ openstack host list
- +----------------+-------------+----------+
- | Host Name | Service | Zone |
- +----------------+-------------+----------+
- | compute01 | compute | nova |
- | compute02 | compute | nova |
- +----------------+-------------+----------+
+Availability Zones with Placement
+---------------------------------
+In order to use placement to honor availability zone requests, there must be
+placement aggregates that match the membership and UUID of nova host aggregates
+that you assign as availability zones. The same key in aggregate metadata used
+by the `AvailabilityZoneFilter` filter controls this function, and is enabled by
+setting :oslo.config:option:`scheduler.query_placement_for_availability_zone`
+to ``True``. As of 24.0.0 (Xena), this is the default.
-#. To view the list of valid compute nodes, use the :command:`openstack
- hypervisor list` command.
+.. code-block:: console
- .. code-block:: console
+ $ openstack --os-compute-api-version=2.53 aggregate create myaz
+ +-------------------+--------------------------------------+
+ | Field | Value |
+ +-------------------+--------------------------------------+
+ | availability_zone | None |
+ | created_at | 2018-03-29T16:22:23.175884 |
+ | deleted | False |
+ | deleted_at | None |
+ | id | 4 |
+ | name | myaz |
+ | updated_at | None |
+ | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 |
+ +-------------------+--------------------------------------+
- $ openstack hypervisor list
- +----+---------------------+
- | ID | Hypervisor Hostname |
- +----+---------------------+
- | 1 | server2 |
- | 2 | server3 |
- | 3 | server4 |
- +----+---------------------+
+ $ openstack --os-compute-api-version=2.53 aggregate add host myaz node1
+ +-------------------+--------------------------------------+
+ | Field | Value |
+ +-------------------+--------------------------------------+
+ | availability_zone | None |
+ | created_at | 2018-03-29T16:22:23.175884 |
+ | deleted | False |
+ | deleted_at | None |
+ | hosts | [u'node1'] |
+ | id | 4 |
+ | name | myagg |
+ | updated_at | None |
+ | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 |
+ +-------------------+--------------------------------------+
+
+ $ openstack aggregate set --property availability_zone=az002 myaz
+
+ $ openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 815a5634-86fb-4e1e-8824-8a631fee3e06
+
+Without the above configuration, the `AvailabilityZoneFilter` filter must be
+enabled in :oslo.config:option:`filter_scheduler.enabled_filters` to retain
+proper behavior.
+
+Implications for moving servers
+-------------------------------
+
+There are several ways to move a server to another host: evacuate, resize,
+cold migrate, live migrate, and unshelve. Move operations typically go through
+the scheduler to pick the target host *unless* a target host is specified and
+the request forces the server to that host by bypassing the scheduler. Only
+evacuate and live migrate can forcefully bypass the scheduler and move a
+server to a specified host and even then it is highly recommended to *not*
+force and bypass the scheduler.
+
+With respect to availability zones, a server is restricted to a zone if:
+
+1. The server was created in a specific zone with the ``POST /servers`` request
+ containing the ``availability_zone`` parameter.
+
+2. If the server create request did not contain the ``availability_zone``
+ parameter but the API service is configured for
+ :oslo.config:option:`default_schedule_zone` then by default the server will
+ be scheduled to that zone.
+
+3. The shelved offloaded server was unshelved by specifying the
+ ``availability_zone`` with the ``POST /servers/{server_id}/action`` request
+ using microversion 2.77 or greater.
+
+4. :oslo.config:option:`cinder.cross_az_attach` is False,
+ :oslo.config:option:`default_schedule_zone` is None,
+ the server is created without an explicit zone but with pre-existing volume
+ block device mappings. In that case the server will be created in the same
+ zone as the volume(s) if the volume zone is not the same as
+ :oslo.config:option:`default_availability_zone`. See `Resource affinity`_
+ for details.
+
+If the server was not created in a specific zone then it is free to be moved
+to other zones, i.e. the :ref:`AvailabilityZoneFilter `
+is a no-op.
+
+Knowing this, it is dangerous to force a server to another host with evacuate
+or live migrate if the server is restricted to a zone and is then forced to
+move to a host in another zone, because that will create an inconsistency in
+the internal tracking of where that server should live and may require manually
+updating the database for that server. For example, if a user creates a server
+in zone A and then the admin force live migrates the server to zone B, and then
+the user resizes the server, the scheduler will try to move it back to zone A
+which may or may not work, e.g. if the admin deleted or renamed zone A in the
+interim.
+
+Resource affinity
+~~~~~~~~~~~~~~~~~
+
+The :oslo.config:option:`cinder.cross_az_attach` configuration option can be
+used to restrict servers and the volumes attached to servers to the same
+availability zone.
+
+A typical use case for setting ``cross_az_attach=False`` is to enforce compute
+and block storage affinity, for example in a High Performance Compute cluster.
+
+By default ``cross_az_attach`` is True meaning that the volumes attached to
+a server can be in a different availability zone than the server. If set to
+False, then when creating a server with pre-existing volumes or attaching a
+volume to a server, the server and volume zone must match otherwise the
+request will fail. In addition, if the nova-compute service creates the volumes
+to attach to the server during server create, it will request that those
+volumes are created in the same availability zone as the server, which must
+exist in the block storage (cinder) service.
+
+As noted in the `Implications for moving servers`_ section, forcefully moving
+a server to another zone could also break affinity with attached volumes.
+
+.. note::
+
+ ``cross_az_attach=False`` is not widely used nor tested extensively and
+ thus suffers from some known issues:
+
+ * `Bug 1694844 `_. This is
+ fixed in the 21.0.0 (Ussuri) release by using the volume zone for the
+ server being created if the server is created without an explicit zone,
+ :oslo.config:option:`default_schedule_zone` is None, and the volume zone
+ does not match the value of
+ :oslo.config:option:`default_availability_zone`.
+ * `Bug 1781421 `_
+
+
+.. _using-availability-zones-to-select-hosts:
+
+Using availability zones to select hosts
+----------------------------------------
+
+We can combine availability zones with a specific host and/or node to select
+where an instance is launched. For example:
+
+.. code-block:: console
+
+ $ openstack server create --availability-zone ZONE:HOST:NODE ... SERVER
+
+.. note::
+
+ It is possible to use ``ZONE``, ``ZONE:HOST``, and ``ZONE::NODE``.
+
+.. note::
+
+ This is an admin-only operation by default, though you can modify this
+ behavior using the ``os_compute_api:servers:create:forced_host`` rule in
+ ``policy.yaml``.
+
+However, as discussed `previously `_, when
+launching instances in this manner the scheduler filters are not run. For this
+reason, this behavior is considered legacy behavior and, starting with the 2.74
+microversion, it is now possible to specify a host or node explicitly. For
+example:
+
+.. code-block:: console
+
+ $ openstack --os-compute-api-version 2.74 server create \
+ --host HOST --hypervisor-hostname HYPERVISOR ... SERVER
+
+.. note::
+
+ This is an admin-only operation by default, though you can modify this
+ behavior using the ``compute:servers:create:requested_destination`` rule in
+ ``policy.yaml``.
+
+This avoids the need to explicitly select an availability zone and ensures the
+scheduler filters are not bypassed.
+
+
+Usage
+-----
+
+Creating an availability zone (AZ) is done by associating metadata with a
+:doc:`host aggregate `. For this reason, the
+:command:`openstack` client provides the ability to create a host aggregate and
+associate it with an AZ in one command. For example, to create a new aggregate,
+associating it with an AZ in the process, and add host to it using the
+:command:`openstack` client, run:
+
+.. code-block:: console
+
+ $ openstack aggregate create --zone my-availability-zone my-aggregate
+ $ openstack aggregate add host my-aggregate my-host
+
+.. note::
+
+ While it is possible to add a host to multiple host aggregates, it is not
+ possible to add them to multiple availability zones. Attempting to add a
+ host to multiple host aggregates associated with differing availability
+ zones will result in a failure.
+
+Alternatively, you can set this metadata manually for an existing host
+aggregate. For example:
+
+.. code-block:: console
+
+ $ openstack aggregate set \
+ --property availability_zone=my-availability-zone my-aggregate
+
+To list all host aggregates and show information about a specific aggregate, in
+order to determine which AZ the host aggregate(s) belong to, run:
+
+.. code-block:: console
+
+ $ openstack aggregate list --long
+ $ openstack aggregate show my-aggregate
+
+Finally, to disassociate a host aggregate from an availability zone, run:
+
+.. code-block:: console
+
+ $ openstack aggregate unset --property availability_zone my-aggregate
+
+
+Configuration
+-------------
+
+Refer to :doc:`/admin/aggregates` for information on configuring both host
+aggregates and availability zones.
diff --git a/doc/source/admin/cells.rst b/doc/source/admin/cells.rst
new file mode 100644
index 00000000000..92f336a4e40
--- /dev/null
+++ b/doc/source/admin/cells.rst
@@ -0,0 +1,92 @@
+==================
+CellsV2 Management
+==================
+
+This section describes the various recommended practices/tips for runnning and
+maintaining CellsV2 for admins and operators. For more details regarding the
+basic concept of CellsV2 and its layout please see the main :doc:`/user/cellsv2-layout`
+page.
+
+.. _handling-cell-failures:
+
+Handling cell failures
+----------------------
+
+For an explanation on how ``nova-api`` handles cell failures please see the
+`Handling Down Cells `__
+section of the Compute API guide. Below, you can find some recommended practices and
+considerations for effectively tolerating cell failure situations.
+
+Configuration considerations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Since a cell being reachable or not is determined through timeouts, it is suggested
+to provide suitable values for the following settings based on your requirements.
+
+#. :oslo.config:option:`database.max_retries` is 10 by default meaning every time
+ a cell becomes unreachable, it would retry 10 times before nova can declare the
+ cell as a "down" cell.
+#. :oslo.config:option:`database.retry_interval` is 10 seconds and
+ :oslo.config:option:`oslo_messaging_rabbit.rabbit_retry_interval` is 1 second by
+ default meaning every time a cell becomes unreachable it would retry every 10
+ seconds or 1 second depending on if it's a database or a message queue problem.
+#. Nova also has a timeout value called ``CELL_TIMEOUT`` which is hardcoded to 60
+ seconds and that is the total time the nova-api would wait before returning
+ partial results for the "down" cells.
+
+The values of the above settings will affect the time required for nova to decide
+if a cell is unreachable and then take the necessary actions like returning
+partial results.
+
+The operator can also control the results of certain actions like listing
+servers and services depending on the value of the
+:oslo.config:option:`api.list_records_by_skipping_down_cells` config option.
+If this is true, the results from the unreachable cells will be skipped
+and if it is false, the request will just fail with an API error in situations where
+partial constructs cannot be computed.
+
+Disabling down cells
+~~~~~~~~~~~~~~~~~~~~
+
+While the temporary outage in the infrastructure is being fixed, the affected
+cells can be disabled so that they are removed from being scheduling candidates.
+To enable or disable a cell, use :command:`nova-manage cell_v2 update_cell
+--cell_uuid --disable`. See the :ref:`man-page-cells-v2` man page
+for details on command usage.
+
+Known issues
+~~~~~~~~~~~~
+
+1. **Services and Performance:** In case a cell is down during the startup of nova
+ services, there is the chance that the services hang because of not being able
+ to connect to all the cell databases that might be required for certain calculations
+ and initializations. An example scenario of this situation is if
+ :oslo.config:option:`upgrade_levels.compute` is set to ``auto`` then the
+ ``nova-api`` service hangs on startup if there is at least one unreachable
+ cell. This is because it needs to connect to all the cells to gather
+ information on each of the compute service's version to determine the compute
+ version cap to use. The current workaround is to pin the
+ :oslo.config:option:`upgrade_levels.compute` to a particular version like
+ "rocky" and get the service up under such situations. See `bug 1815697
+ `__ for more details. Also note
+ that in general during situations where cells are not reachable certain
+ "slowness" may be experienced in operations requiring hitting all the cells
+ because of the aforementioned configurable timeout/retry values.
+
+.. _cells-counting-quotas:
+
+2. **Counting Quotas:** Another known issue is in the current approach of counting
+ quotas where we query each cell database to get the used resources and aggregate
+ them which makes it sensitive to temporary cell outages. While the cell is
+ unavailable, we cannot count resource usage residing in that cell database and
+ things would behave as though more quota is available than should be. That is,
+ if a tenant has used all of their quota and part of it is in cell A and cell A
+ goes offline temporarily, that tenant will suddenly be able to allocate more
+ resources than their limit (assuming cell A returns, the tenant will have more
+ resources allocated than their allowed quota).
+
+ .. note:: Starting in the Train (20.0.0) release, it is possible to
+ configure counting of quota usage from the placement service and
+ API database to make quota usage calculations resilient to down or
+ poor-performing cells in a multi-cell environment. See the
+ :doc:`quotas documentation` for more details.
diff --git a/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst b/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst
index cab607bbdb3..ef4e1fcf00b 100644
--- a/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst
+++ b/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst
@@ -30,7 +30,6 @@ The following examples show the host usage statistics for a host called
| devstack | compute | nova |
| devstack | network | internal |
| devstack | scheduler | internal |
- | devstack | consoleauth | internal |
+-----------+-------------+----------+
* Get a summary of resource usage of all of the instances running on the host:
@@ -95,7 +94,7 @@ Show instance usage statistics
have a standard format as below. Before microversion 2.48, each hypervisor
had its own format. For more details on diagnostics response message see
`server diagnostics api
- `__
+ `__
documentation.
.. code-block:: console
diff --git a/doc/source/admin/config-drive.rst b/doc/source/admin/config-drive.rst
new file mode 100644
index 00000000000..05f553478b9
--- /dev/null
+++ b/doc/source/admin/config-drive.rst
@@ -0,0 +1,109 @@
+=============
+Config drives
+=============
+
+.. note::
+
+ This section provides deployment information about the config drive feature.
+ For end-user information about the config drive feature and instance metadata
+ in general, refer to the :doc:`user guide `.
+
+Config drives are special drives that are attached to an instance when it boots.
+The instance can mount this drive and read files from it to get information that
+is normally available through :doc:`the metadata service
+`.
+
+There are many use cases for the config drive. One such use case is to pass a
+networking configuration when you do not use DHCP to assign IP addresses to
+instances. For example, you might pass the IP address configuration for the
+instance through the config drive, which the instance can mount and access
+before you configure the network settings for the instance. Another common
+reason to use config drives is load. If running something like the OpenStack
+puppet providers in your instances, they can hit the :doc:`metadata servers
+` every fifteen minutes, simultaneously for every
+instance you have. They are just checking in, and building facts, but it's not
+insignificant load. With a config drive, that becomes a local (cached) disk
+read. Finally, using a config drive means you're not dependent on the metadata
+service being up, reachable, or performing well to do things like reboot your
+instance that runs `cloud-init`_ at the beginning.
+
+Any modern guest operating system that is capable of mounting an ISO 9660 or
+VFAT file system can use the config drive.
+
+
+Requirements and guidelines
+---------------------------
+
+To use the config drive, you must follow the following requirements for the
+compute host and image.
+
+.. rubric:: Compute host requirements
+
+The following virt drivers support the config drive: libvirt,
+Hyper-V, VMware, and (since 17.0.0 Queens) PowerVM. The Bare Metal service also
+supports the config drive.
+
+- To use config drives with libvirt or VMware, you must first
+ install the :command:`genisoimage` package on each compute host. Use the
+ :oslo.config:option:`mkisofs_cmd` config option to set the path where you
+ install the :command:`genisoimage` program. If :command:`genisoimage` is in
+ the same path as the :program:`nova-compute` service, you do not need to set
+ this flag.
+
+- To use config drives with Hyper-V, you must set the
+ :oslo.config:option:`mkisofs_cmd` config option to the full path to an
+ :command:`mkisofs.exe` installation. Additionally, you must set the
+ :oslo.config:option:`hyperv.qemu_img_cmd` config option to the full path to an
+ :command:`qemu-img` command installation.
+
+- To use config drives with PowerVM or the Bare Metal service, you do not need
+ to prepare anything.
+
+.. rubric:: Image requirements
+
+An image built with a recent version of the `cloud-init`_ package can
+automatically access metadata passed through the config drive. The cloud-init
+package version 0.7.1 works with Ubuntu, Fedora based images (such as Red Hat
+Enterprise Linux) and openSUSE based images (such as SUSE Linux Enterprise
+Server). If an image does not have the cloud-init package installed, you must
+customize the image to run a script that mounts the config drive on boot, reads
+the data from the drive, and takes appropriate action such as adding the public
+key to an account. For more details about how data is organized on the config
+drive, refer to the :ref:`user guide `.
+
+
+Configuration
+-------------
+
+The :program:`nova-compute` service accepts the following config drive-related
+options:
+
+- :oslo.config:option:`api.config_drive_skip_versions`
+- :oslo.config:option:`force_config_drive`
+- :oslo.config:option:`config_drive_format`
+
+If using the HyperV compute driver, the following additional options are
+supported:
+
+- :oslo.config:option:`hyperv.config_drive_cdrom`
+
+For example, to ensure nova always provides a config drive to instances but
+versions ``2018-08-27`` (Rocky) and ``2017-02-22`` (Ocata) are skipped, add the
+following to :file:`nova.conf`:
+
+.. code-block:: ini
+
+ [DEFAULT]
+ force_config_drive = True
+
+ [api]
+ config_drive_skip_versions = 2018-08-27 2017-02-22
+
+.. note::
+
+ The ``img_config_drive`` image metadata property can be used to force enable
+ the config drive. In addition, users can explicitly request a config drive
+ when booting instances. For more information, refer to the :ref:`user guide
+ `.
+
+.. _cloud-init: https://cloudinit.readthedocs.io/en/latest/
diff --git a/doc/source/admin/configuration/api.rst b/doc/source/admin/configuration/api.rst
index 979169ee8f8..a8c2e6a0f4a 100644
--- a/doc/source/admin/configuration/api.rst
+++ b/doc/source/admin/configuration/api.rst
@@ -6,8 +6,9 @@ The Compute API, is the component of OpenStack Compute that receives and
responds to user requests, whether they be direct API calls, or via the CLI
tools or dashboard.
+
Configure Compute API password handling
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------
The OpenStack Compute API enables users to specify an administrative password
when they create, rebuild, rescue or evacuate a server instance.
diff --git a/doc/source/admin/configuration/cells.rst b/doc/source/admin/configuration/cells.rst
deleted file mode 100644
index ddc2c1775aa..00000000000
--- a/doc/source/admin/configuration/cells.rst
+++ /dev/null
@@ -1,295 +0,0 @@
-==========
-Cells (v1)
-==========
-
-.. warning::
-
- Configuring and implementing Cells v1 is not recommended for new deployments
- of the Compute service (nova). Cells v2 replaces cells v1, and v2 is
- required to install or upgrade the Compute service to the 15.0.0 Ocata
- release. More information on cells v2 can be found in :doc:`/user/cells`.
-
-`Cells` functionality enables you to scale an OpenStack Compute cloud in a more
-distributed fashion without having to use complicated technologies like
-database and message queue clustering. It supports very large deployments.
-
-When this functionality is enabled, the hosts in an OpenStack Compute cloud are
-partitioned into groups called cells. Cells are configured as a tree. The
-top-level cell should have a host that runs a ``nova-api`` service, but no
-``nova-compute`` services. Each child cell should run all of the typical
-``nova-*`` services in a regular Compute cloud except for ``nova-api``. You can
-think of cells as a normal Compute deployment in that each cell has its own
-database server and message queue broker.
-
-The ``nova-cells`` service handles communication between cells and selects
-cells for new instances. This service is required for every cell. Communication
-between cells is pluggable, and currently the only option is communication
-through RPC.
-
-Cells scheduling is separate from host scheduling. ``nova-cells`` first picks
-a cell. Once a cell is selected and the new build request reaches its
-``nova-cells`` service, it is sent over to the host scheduler in that cell and
-the build proceeds as it would have without cells.
-
-Cell configuration options
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. todo:: This is duplication. We should be able to use the
- oslo.config.sphinxext module to generate this for us
-
-Cells are disabled by default. All cell-related configuration options appear in
-the ``[cells]`` section in ``nova.conf``. The following cell-related options
-are currently supported:
-
-``enable``
- Set to ``True`` to turn on cell functionality. Default is ``false``.
-
-``name``
- Name of the current cell. Must be unique for each cell.
-
-``capabilities``
- List of arbitrary ``key=value`` pairs defining capabilities of the current
- cell. Values include ``hypervisor=xenserver;kvm,os=linux;windows``.
-
-``call_timeout``
- How long in seconds to wait for replies from calls between cells.
-
-``scheduler_filter_classes``
- Filter classes that the cells scheduler should use. By default, uses
- ``nova.cells.filters.all_filters`` to map to all cells filters included with
- Compute.
-
-``scheduler_weight_classes``
- Weight classes that the scheduler for cells uses. By default, uses
- ``nova.cells.weights.all_weighers`` to map to all cells weight algorithms
- included with Compute.
-
-``ram_weight_multiplier``
- Multiplier used to weight RAM. Negative numbers indicate that Compute should
- stack VMs on one host instead of spreading out new VMs to more hosts in the
- cell. The default value is 10.0.
-
-Configure the API (top-level) cell
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The cell type must be changed in the API cell so that requests can be proxied
-through ``nova-cells`` down to the correct cell properly. Edit the
-``nova.conf`` file in the API cell, and specify ``api`` in the ``cell_type``
-key:
-
-.. code-block:: ini
-
- [DEFAULT]
- compute_api_class=nova.compute.cells_api.ComputeCellsAPI
- # ...
-
- [cells]
- cell_type= api
-
-Configure the child cells
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Edit the ``nova.conf`` file in the child cells, and specify ``compute`` in the
-``cell_type`` key:
-
-.. code-block:: ini
-
- [DEFAULT]
- # Disable quota checking in child cells. Let API cell do it exclusively.
- quota_driver=nova.quota.NoopQuotaDriver
-
- [cells]
- cell_type = compute
-
-Configure the database in each cell
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Before bringing the services online, the database in each cell needs to be
-configured with information about related cells. In particular, the API cell
-needs to know about its immediate children, and the child cells must know about
-their immediate agents. The information needed is the ``RabbitMQ`` server
-credentials for the particular cell.
-
-Use the :command:`nova-manage cell create` command to add this information to
-the database in each cell:
-
-.. code-block:: console
-
- # nova-manage cell create -h
- usage: nova-manage cell create [-h] [--name ]
- [--cell_type ]
- [--username ] [--password ]
- [--broker_hosts ]
- [--hostname ] [--port ]
- [--virtual_host ]
- [--woffset ] [--wscale ]
-
- optional arguments:
- -h, --help show this help message and exit
- --name Name for the new cell
- --cell_type
- Whether the cell is parent/api or child/compute
- --username
- Username for the message broker in this cell
- --password
- Password for the message broker in this cell
- --broker_hosts
- Comma separated list of message brokers in this cell.
- Each Broker is specified as hostname:port with both
- mandatory. This option overrides the --hostname and
- --port options (if provided).
- --hostname
- Address of the message broker in this cell
- --port Port number of the message broker in this cell
- --virtual_host
- The virtual host of the message broker in this cell
- --woffset
- --wscale
-
-As an example, assume an API cell named ``api`` and a child cell named
-``cell1``.
-
-Within the ``api`` cell, specify the following ``RabbitMQ`` server information:
-
-.. code-block:: ini
-
- rabbit_host=10.0.0.10
- rabbit_port=5672
- rabbit_username=api_user
- rabbit_password=api_passwd
- rabbit_virtual_host=api_vhost
-
-Within the ``cell1`` child cell, specify the following ``RabbitMQ`` server
-information:
-
-.. code-block:: ini
-
- rabbit_host=10.0.1.10
- rabbit_port=5673
- rabbit_username=cell1_user
- rabbit_password=cell1_passwd
- rabbit_virtual_host=cell1_vhost
-
-You can run this in the API cell as root:
-
-.. code-block:: console
-
- # nova-manage cell create --name cell1 --cell_type child \
- --username cell1_user --password cell1_passwd --hostname 10.0.1.10 \
- --port 5673 --virtual_host cell1_vhost --woffset 1.0 --wscale 1.0
-
-Repeat the previous steps for all child cells.
-
-In the child cell, run the following, as root:
-
-.. code-block:: console
-
- # nova-manage cell create --name api --cell_type parent \
- --username api_user --password api_passwd --hostname 10.0.0.10 \
- --port 5672 --virtual_host api_vhost --woffset 1.0 --wscale 1.0
-
-To customize the Compute cells, use the configuration option settings
-documented above.
-
-Cell scheduling configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To determine the best cell to use to launch a new instance, Compute uses a set
-of filters and weights defined in the ``/etc/nova/nova.conf`` file. The
-following options are available to prioritize cells for scheduling:
-
-``scheduler_filter_classes``
- List of filter classes. By default ``nova.cells.filters.all_filters``
- is specified, which maps to all cells filters included with Compute
- (see the section called :ref:`Filters `).
-
-``scheduler_weight_classes``
- List of weight classes. By default ``nova.cells.weights.all_weighers`` is
- specified, which maps to all cell weight algorithms included with Compute.
- The following modules are available:
-
- ``mute_child``
- Downgrades the likelihood of child cells being chosen for scheduling
- requests, which haven't sent capacity or capability updates in a while.
- Options include ``mute_weight_multiplier`` (multiplier for mute children;
- value should be negative).
-
- ``ram_by_instance_type``
- Select cells with the most RAM capacity for the instance type being
- requested. Because higher weights win, Compute returns the number of
- available units for the instance type requested. The
- ``ram_weight_multiplier`` option defaults to 10.0 that adds to the weight
- by a factor of 10.
-
- Use a negative number to stack VMs on one host instead of spreading
- out new VMs to more hosts in the cell.
-
- ``weight_offset``
- Allows modifying the database to weight a particular cell. You can use this
- when you want to disable a cell (for example, '0'), or to set a default
- cell by making its ``weight_offset`` very high (for example,
- ``999999999999999``). The highest weight will be the first cell to be
- scheduled for launching an instance.
-
-Additionally, the following options are available for the cell scheduler:
-
-``scheduler_retries``
- Specifies how many times the scheduler tries to launch a new instance when no
- cells are available (default=10).
-
-``scheduler_retry_delay``
- Specifies the delay (in seconds) between retries (default=2).
-
-As an admin user, you can also add a filter that directs builds to a particular
-cell. The ``policy.json`` file must have a line with
-``"cells_scheduler_filter:TargetCellFilter" : "is_admin:True"`` to let an admin
-user specify a scheduler hint to direct a build to a particular cell.
-
-Optional cell configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Cells store all inter-cell communication data, including user names and
-passwords, in the database. Because the cells data is not updated very
-frequently, use the ``[cells]cells_config`` option to specify a JSON file to
-store cells data. With this configuration, the database is no longer consulted
-when reloading the cells data. The file must have columns present in the Cell
-model (excluding common database fields and the ``id`` column). You must
-specify the queue connection information through a ``transport_url`` field,
-instead of ``username``, ``password``, and so on.
-
-The ``transport_url`` has the following form::
-
- rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
-
-The scheme can only be ``rabbit``.
-
-The following sample shows this optional configuration:
-
-.. code-block:: json
-
- {
- "parent": {
- "name": "parent",
- "api_url": "http://api.example.com:8774",
- "transport_url": "rabbit://rabbit.example.com",
- "weight_offset": 0.0,
- "weight_scale": 1.0,
- "is_parent": true
- },
- "cell1": {
- "name": "cell1",
- "api_url": "http://api.example.com:8774",
- "transport_url": "rabbit://rabbit1.example.com",
- "weight_offset": 0.0,
- "weight_scale": 1.0,
- "is_parent": false
- },
- "cell2": {
- "name": "cell2",
- "api_url": "http://api.example.com:8774",
- "transport_url": "rabbit://rabbit2.example.com",
- "weight_offset": 0.0,
- "weight_scale": 1.0,
- "is_parent": false
- }
- }
diff --git a/doc/source/admin/configuration/cross-cell-resize.rst b/doc/source/admin/configuration/cross-cell-resize.rst
new file mode 100644
index 00000000000..d17ee24109a
--- /dev/null
+++ b/doc/source/admin/configuration/cross-cell-resize.rst
@@ -0,0 +1,309 @@
+=================
+Cross-cell resize
+=================
+
+This document describes how to configure nova for cross-cell resize.
+For information on :term:`same-cell resize `, refer to
+:doc:`/admin/configuration/resize`.
+
+Historically resizing and cold migrating a server has been explicitly
+`restricted`_ to within the same cell in which the server already exists.
+The cross-cell resize feature allows configuring nova to allow resizing
+and cold migrating servers across cells.
+
+The full design details are in the `Ussuri spec`_ and there is a `video`_ from
+a summit talk with a high-level overview.
+
+.. _restricted: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/conductor/tasks/migrate.py#L164
+.. _Ussuri spec: https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/cross-cell-resize.html
+.. _video: https://www.openstack.org/videos/summits/denver-2019/whats-new-in-nova-cellsv2
+
+Use case
+--------
+
+There are many reasons to use multiple cells in a nova deployment beyond just
+scaling the database and message queue. Cells can also be used to shard a
+deployment by hardware generation and feature functionality. When sharding by
+hardware generation, it would be natural to setup a host aggregate for each
+cell and map flavors to the aggregate. Then when it comes time to decommission
+old hardware the deployer could provide new flavors and request that users
+resize to the new flavors, before some deadline, which under the covers will
+migrate their servers to the new cell with newer hardware. Administrators
+could also just cold migrate the servers during a maintenance window to the
+new cell.
+
+Requirements
+------------
+
+To enable cross-cell resize functionality the following conditions must be met.
+
+Minimum compute versions
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+All compute services must be upgraded to 21.0.0 (Ussuri) or later and not be
+pinned to older RPC API versions in
+:oslo.config:option:`upgrade_levels.compute`.
+
+Policy configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The policy rule ``compute:servers:resize:cross_cell`` controls who can perform
+a cross-cell resize or cold migrate operation. By default the policy disables
+the functionality for *all* users. A microversion is not required to opt into
+the behavior, just passing the policy check. As such, it is recommended to
+start by allowing only certain users to be able to perform a cross-cell resize
+or cold migration, for example by setting the rule to ``rule:admin_api`` or
+some other rule for test teams but not normal users until you are comfortable
+supporting the feature.
+
+Compute driver
+~~~~~~~~~~~~~~
+
+There are no special compute driver implementations required to support the
+feature, it is built on existing driver interfaces used during resize and
+shelve/unshelve. However, only the libvirt compute driver has integration
+testing in the ``nova-multi-cell`` CI job.
+
+Networking
+~~~~~~~~~~
+
+The networking API must expose the ``Port Bindings Extended`` API extension
+which was added in the 13.0.0 (Rocky) release for Neutron.
+
+Notifications
+-------------
+
+The types of events and their payloads remain unchanged. The major difference
+from same-cell resize is the *publisher_id* may be different in some cases
+since some events are sent from the conductor service rather than a compute
+service. For example, with same-cell resize the
+``instance.resize_revert.start`` notification is sent from the source compute
+host in the `finish_revert_resize`_ method but with cross-cell resize that
+same notification is sent from the conductor service.
+
+Obviously the actual message queue sending the notifications would be different
+for the source and target cells assuming they use separate transports.
+
+.. _finish_revert_resize: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/compute/manager.py#L4326
+
+Instance actions
+----------------
+
+The overall instance actions named ``resize``, ``confirmResize`` and
+``revertResize`` are the same as same-cell resize. However, the *events* which
+make up those actions will be different for cross-cell resize since the event
+names are generated based on the compute service methods involved in the
+operation and there are different methods involved in a cross-cell resize.
+This is important for triage when a cross-cell resize operation fails.
+
+Scheduling
+----------
+
+The :ref:`CrossCellWeigher ` is enabled by default. When a
+scheduling request allows selecting compute nodes from another cell the weigher
+will by default *prefer* hosts within the source cell over hosts from another
+cell. However, this behavior is configurable using the
+:oslo.config:option:`filter_scheduler.cross_cell_move_weight_multiplier`
+configuration option if, for example, you want to drain old cells when resizing
+or cold migrating.
+
+Code flow
+---------
+
+The end user experience is meant to not change, i.e. status transitions. A
+successfully cross-cell resized server will go to ``VERIFY_RESIZE`` status
+and from there the user can either confirm or revert the resized server using
+the normal `confirmResize`_ and `revertResize`_ server action APIs.
+
+Under the covers there are some differences from a traditional same-cell
+resize:
+
+* There is no inter-compute interaction. Everything is synchronously
+ `orchestrated`_ from the (super)conductor service. This uses the
+ :oslo.config:option:`long_rpc_timeout` configuration option.
+
+* The orchestration tasks in the (super)conductor service are in charge of
+ creating a copy of the instance and its related records in the target cell
+ database at the beginning of the operation, deleting them in case of rollback
+ or when the resize is confirmed/reverted, and updating the
+ ``instance_mappings`` table record in the API database.
+
+* Non-volume-backed servers will have their root disk uploaded to the image
+ service as a temporary snapshot image just like during the `shelveOffload`_
+ operation. When finishing the resize on the destination host in the target
+ cell that snapshot image will be used to spawn the guest and then the
+ snapshot image will be deleted.
+
+.. _confirmResize: https://docs.openstack.org/api-ref/compute/#confirm-resized-server-confirmresize-action
+.. _revertResize: https://docs.openstack.org/api-ref/compute/#revert-resized-server-revertresize-action
+.. _orchestrated: https://opendev.org/openstack/nova/src/branch/master/nova/conductor/tasks/cross_cell_migrate.py
+.. _shelveOffload: https://docs.openstack.org/api-ref/compute/#shelf-offload-remove-server-shelveoffload-action
+
+Sequence diagram
+----------------
+
+The following diagrams are current as of the 21.0.0 (Ussuri) release.
+
+.. NOTE(mriedem): These diagrams could be more detailed, for example breaking
+ down the individual parts of the conductor tasks and the calls made on
+ the source and dest compute to the virt driver, cinder and neutron, but
+ the diagrams could (1) get really complex and (2) become inaccurate with
+ changes over time. If there are particular sub-sequences that should have
+ diagrams I would suggest putting those into separate focused diagrams.
+
+Resize
+~~~~~~
+
+This is the sequence of calls to get the server to ``VERIFY_RESIZE`` status.
+
+.. seqdiag::
+
+ seqdiag {
+ API; Conductor; Scheduler; Source; Destination;
+ edge_length = 300;
+ span_height = 15;
+ activation = none;
+ default_note_color = white;
+
+ API ->> Conductor [label = "cast", note = "resize_instance/migrate_server"];
+ Conductor => Scheduler [label = "MigrationTask", note = "select_destinations"];
+ Conductor -> Conductor [label = "TargetDBSetupTask"];
+ Conductor => Destination [label = "PrepResizeAtDestTask", note = "prep_snapshot_based_resize_at_dest"];
+ Conductor => Source [label = "PrepResizeAtSourceTask", note = "prep_snapshot_based_resize_at_source"];
+ Conductor => Destination [label = "FinishResizeAtDestTask", note = "finish_snapshot_based_resize_at_dest"];
+ Conductor -> Conductor [label = "FinishResizeAtDestTask", note = "update instance mapping"];
+ }
+
+Confirm resize
+~~~~~~~~~~~~~~
+
+This is the sequence of calls when confirming `or deleting`_ a server in
+``VERIFY_RESIZE`` status.
+
+.. seqdiag::
+
+ seqdiag {
+ API; Conductor; Source;
+ edge_length = 300;
+ span_height = 15;
+ activation = none;
+ default_note_color = white;
+
+ API ->> Conductor [label = "cast (or call if deleting)", note = "confirm_snapshot_based_resize"];
+
+ // separator to indicate everything after this is driven by ConfirmResizeTask
+ === ConfirmResizeTask ===
+
+ Conductor => Source [label = "call", note = "confirm_snapshot_based_resize_at_source"];
+ Conductor -> Conductor [note = "hard delete source cell instance"];
+ Conductor -> Conductor [note = "update target cell instance status"];
+
+ }
+
+.. _or deleting: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/compute/api.py#L2171
+
+Revert resize
+~~~~~~~~~~~~~
+
+This is the sequence of calls when reverting a server in ``VERIFY_RESIZE``
+status.
+
+.. seqdiag::
+
+ seqdiag {
+ API; Conductor; Source; Destination;
+ edge_length = 300;
+ span_height = 15;
+ activation = none;
+ default_note_color = white;
+
+ API ->> Conductor [label = "cast", note = "revert_snapshot_based_resize"];
+
+ // separator to indicate everything after this is driven by RevertResizeTask
+ === RevertResizeTask ===
+
+ Conductor -> Conductor [note = "update records from target to source cell"];
+ Conductor -> Conductor [note = "update instance mapping"];
+ Conductor => Destination [label = "call", note = "revert_snapshot_based_resize_at_dest"];
+ Conductor -> Conductor [note = "hard delete target cell instance"];
+ Conductor => Source [label = "call", note = "finish_revert_snapshot_based_resize_at_source"];
+
+ }
+
+Limitations
+-----------
+
+These are known to not yet be supported in the code:
+
+* Instances with ports attached that have
+ :doc:`bandwidth-aware ` resource
+ provider allocations. Nova falls back to same-cell resize if the server has
+ such ports.
+* Rescheduling to alternative hosts within the same target cell in case the
+ primary selected host fails the ``prep_snapshot_based_resize_at_dest`` call.
+
+These may not work since they have not been validated by integration testing:
+
+* Instances with PCI devices attached.
+* Instances with a NUMA topology.
+
+Other limitations:
+
+* The config drive associated with the server, if there is one, will be
+ re-generated on the destination host in the target cell. Therefore if the
+ server was created with `personality files`_ they will be lost. However, this
+ is no worse than `evacuating`_ a server that had a config drive when the
+ source and destination compute host are not on shared storage or when
+ shelve offloading and unshelving a server with a config drive. If necessary,
+ the resized server can be rebuilt to regain the personality files.
+* The ``_poll_unconfirmed_resizes`` periodic task, which can be
+ :oslo.config:option:`configured ` to automatically
+ confirm pending resizes on the target host, *might* not support cross-cell
+ resizes because doing so would require an :ref:`up-call ` to the
+ API to confirm the resize and cleanup the source cell database.
+
+.. _personality files: https://docs.openstack.org/api-guide/compute/server_concepts.html#server-personality
+.. _evacuating: https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action
+
+Troubleshooting
+---------------
+
+Timeouts
+~~~~~~~~
+
+Configure a :ref:`service user ` in case the user token
+times out, e.g. during the snapshot and download of a large server image.
+
+If RPC calls are timing out with a ``MessagingTimeout`` error in the logs,
+check the :oslo.config:option:`long_rpc_timeout` option to see if it is high
+enough though the default value (30 minutes) should be sufficient.
+
+Recovering from failure
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The orchestration tasks in conductor that drive the operation are built with
+rollbacks so each part of the operation can be rolled back in order if a
+subsequent task fails.
+
+The thing to keep in mind is the ``instance_mappings`` record in the API DB
+is the authority on where the instance "lives" and that is where the API will
+go to show the instance in a ``GET /servers/{server_id}`` call or any action
+performed on the server, including deleting it.
+
+So if the resize fails and there is a copy of the instance and its related
+records in the target cell, the tasks should automatically delete them but if
+not you can hard-delete the records from whichever cell is *not* the one in the
+``instance_mappings`` table.
+
+If the instance is in ``ERROR`` status, check the logs in both the source
+and destination compute service to see if there is anything that needs to be
+manually recovered, for example volume attachments or port bindings, and also
+check the (super)conductor service logs. Assuming volume attachments and
+port bindings are OK (current and pointing at the correct host), then try hard
+rebooting the server to get it back to ``ACTIVE`` status. If that fails, you
+may need to `rebuild`_ the server on the source host. Note that the guest's
+disks on the source host are not deleted until the resize is confirmed so if
+there is an issue prior to confirm or confirm itself fails, the guest disks
+should still be available for rebuilding the instance if necessary.
+
+.. _rebuild: https://docs.openstack.org/api-ref/compute/#rebuild-server-rebuild-action
diff --git a/doc/source/admin/configuration/hypervisor-basics.rst b/doc/source/admin/configuration/hypervisor-basics.rst
deleted file mode 100644
index 9ac1e785e1f..00000000000
--- a/doc/source/admin/configuration/hypervisor-basics.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-===============================
-Hypervisor Configuration Basics
-===============================
-
-The node where the ``nova-compute`` service is installed and operates on the
-same node that runs all of the virtual machines. This is referred to as the
-compute node in this guide.
-
-By default, the selected hypervisor is KVM. To change to another hypervisor,
-change the ``virt_type`` option in the ``[libvirt]`` section of ``nova.conf``
-and restart the ``nova-compute`` service.
-
-Specific options for particular hypervisors can be found in
-the following sections.
diff --git a/doc/source/admin/configuration/hypervisor-hyper-v.rst b/doc/source/admin/configuration/hypervisor-hyper-v.rst
index 0959c7cad1b..79d72cad052 100644
--- a/doc/source/admin/configuration/hypervisor-hyper-v.rst
+++ b/doc/source/admin/configuration/hypervisor-hyper-v.rst
@@ -19,8 +19,9 @@ compute nodes:
- Windows Server 2012 R2 Server and Core (with the Hyper-V role enabled)
- Hyper-V Server
+
Hyper-V configuration
-~~~~~~~~~~~~~~~~~~~~~
+---------------------
The only OpenStack services required on a Hyper-V node are ``nova-compute`` and
``neutron-hyperv-agent``. Regarding the resources needed for this host you have
@@ -34,7 +35,7 @@ configuration information should work for the Windows 2012 and 2012 R2
platforms.
Local storage considerations
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Hyper-V compute node needs to have ample storage for storing the virtual
machine images running on the compute nodes. You may use a single volume for
@@ -43,7 +44,7 @@ all, or partition it into an OS volume and VM volume.
.. _configure-ntp-windows:
Configure NTP
--------------
+~~~~~~~~~~~~~
Network time services must be configured to ensure proper operation of the
OpenStack nodes. To set network time on your Windows host you must run the
@@ -52,7 +53,7 @@ following commands:
.. code-block:: bat
C:\>net stop w32time
- C:\>w32tm /config /manualpeerlist:pool.ntp.org,0x8 /syncfromflags:MANUAL
+ C:\>w32tm /config "/manualpeerlist:pool.ntp.org,0x8" /syncfromflags:MANUAL
C:\>net start w32time
Keep in mind that the node will have to be time synchronized with the other
@@ -61,7 +62,7 @@ server. Note that in case of an Active Directory environment, you may do this
only for the AD Domain Controller.
Configure Hyper-V virtual switching
------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Information regarding the Hyper-V virtual Switch can be found in the `Hyper-V
Virtual Switch Overview`__.
@@ -83,7 +84,7 @@ following PowerShell may be used:
__ https://technet.microsoft.com/en-us/library/hh831823.aspx
Enable iSCSI initiator service
-------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To prepare the Hyper-V node to be able to attach to volumes provided by cinder
you must first make sure the Windows iSCSI initiator service is running and
@@ -95,7 +96,7 @@ started automatically.
PS C:\> Start-Service MSiSCSI
Configure shared nothing live migration
----------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Detailed information on the configuration of live migration can be found in
`this guide`__
@@ -158,7 +159,7 @@ Additional Requirements:
__ https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/manage/Use-live-migration-without-Failover-Clustering-to-move-a-virtual-machine
How to setup live migration on Hyper-V
---------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To enable 'shared nothing live' migration, run the 3 instructions below on each
Hyper-V host:
@@ -175,15 +176,16 @@ Hyper-V host:
provide live migration.
Additional Reading
-------------------
+~~~~~~~~~~~~~~~~~~
This article clarifies the various live migration options in Hyper-V:
`Hyper-V Live Migration of Yesterday
`_
+
Install nova-compute using OpenStack Hyper-V installer
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------------
In case you want to avoid all the manual setup, you can use Cloudbase
Solutions' installer. You can find it here:
@@ -201,28 +203,26 @@ its features can be found here:
`Cloudbase `_
+
.. _windows-requirements:
Requirements
-~~~~~~~~~~~~
+------------
Python
-------
-
-Python 2.7 32bit must be installed as most of the libraries are not working
-properly on the 64bit version.
+~~~~~~
**Setting up Python prerequisites**
-#. Download and install Python 2.7 using the MSI installer from here:
+#. Download and install Python 3.8 using the MSI installer from the `Python
+ website`__.
- `python-2.7.3.msi download
- `_
+ .. __: https://www.python.org/downloads/windows/
.. code-block:: none
- PS C:\> $src = "https://www.python.org/ftp/python/2.7.3/python-2.7.3.msi"
- PS C:\> $dest = "$env:temp\python-2.7.3.msi"
+ PS C:\> $src = "https://www.python.org/ftp/python/3.8.8/python-3.8.8.exe"
+ PS C:\> $dest = "$env:temp\python-3.8.8.exe"
PS C:\> Invoke-WebRequest -Uri $src -OutFile $dest
PS C:\> Unblock-File $dest
PS C:\> Start-Process $dest
@@ -233,34 +233,18 @@ properly on the 64bit version.
.. code-block:: none
PS C:\> $oldPath = [System.Environment]::GetEnvironmentVariable("Path")
- PS C:\> $newPath = $oldPath + ";C:\python27\;C:\python27\Scripts\"
+ PS C:\> $newPath = $oldPath + ";C:\python38\;C:\python38\Scripts\"
PS C:\> [System.Environment]::SetEnvironmentVariable("Path", $newPath, [System.EnvironmentVariableTarget]::User
Python dependencies
--------------------
-
-The following packages need to be downloaded and manually installed:
-
-``setuptools``
- https://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11.win32-py2.7.exe
-
-``pip``
- https://pip.pypa.io/en/latest/installing/
-
-``PyMySQL``
- http://codegood.com/download/10/
-
-``PyWin32``
- https://sourceforge.net/projects/pywin32/files/pywin32/Build%20217/pywin32-217.win32-py2.7.exe
-
-``Greenlet``
- http://www.lfd.uci.edu/~gohlke/pythonlibs/#greenlet
-
-``PyCryto``
- http://www.voidspace.org.uk/downloads/pycrypto26/pycrypto-2.6.win32-py2.7.exe
+~~~~~~~~~~~~~~~~~~~
The following packages must be installed with pip:
+* ``pywin32``
+* ``pymysql``
+* ``greenlet``
+* ``pycryto``
* ``ecdsa``
* ``amqp``
* ``wmi``
@@ -271,8 +255,9 @@ The following packages must be installed with pip:
PS C:\> pip install amqp
PS C:\> pip install wmi
+
Other dependencies
-------------------
+~~~~~~~~~~~~~~~~~~
``qemu-img`` is required for some of the image related operations. You can get
it from here: http://qemu.weilnetz.de/. You must make sure that the
@@ -281,7 +266,7 @@ it from here: http://qemu.weilnetz.de/. You must make sure that the
Some Python packages need to be compiled, so you may use MinGW or Visual
Studio. You can get MinGW from here: http://sourceforge.net/projects/mingw/.
You must configure which compiler is to be used for this purpose by using the
-``distutils.cfg`` file in ``$Python27\Lib\distutils``, which can contain:
+``distutils.cfg`` file in ``$Python38\Lib\distutils``, which can contain:
.. code-block:: ini
@@ -291,37 +276,30 @@ You must configure which compiler is to be used for this purpose by using the
As a last step for setting up MinGW, make sure that the MinGW binaries'
directories are set up in PATH.
+
Install nova-compute
-~~~~~~~~~~~~~~~~~~~~
+--------------------
Download the nova code
-----------------------
+~~~~~~~~~~~~~~~~~~~~~~
#. Use Git to download the necessary source code. The installer to run Git on
Windows can be downloaded here:
- https://github.com/msysgit/msysgit/releases/download/Git-1.9.2-preview20140411/Git-1.9.2-preview20140411.exe
+ https://gitforwindows.org/
#. Download the installer. Once the download is complete, run the installer and
follow the prompts in the installation wizard. The default should be
acceptable for the purposes of this guide.
- .. code-block:: none
-
- PS C:\> $src = "https://github.com/msysgit/msysgit/releases/download/Git-1.9.2-preview20140411/Git-1.9.2-preview20140411.exe"
- PS C:\> $dest = "$env:temp\Git-1.9.2-preview20140411.exe"
- PS C:\> Invoke-WebRequest -Uri $src -OutFile $dest
- PS C:\> Unblock-File $dest
- PS C:\> Start-Process $dest
-
#. Run the following to clone the nova code.
.. code-block:: none
- PS C:\> git.exe clone https://git.openstack.org/openstack/nova
+ PS C:\> git.exe clone https://opendev.org/openstack/nova
Install nova-compute service
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To install ``nova-compute``, run:
@@ -331,7 +309,7 @@ To install ``nova-compute``, run:
PS C:\> python setup.py install
Configure nova-compute
-----------------------
+~~~~~~~~~~~~~~~~~~~~~~
The ``nova.conf`` file must be placed in ``C:\etc\nova`` for running OpenStack
on Hyper-V. Below is a sample ``nova.conf`` for Windows:
@@ -348,7 +326,7 @@ on Hyper-V. Below is a sample ``nova.conf`` for Windows:
use_cow_images = true
force_config_drive = false
injected_network_template = C:\Program Files (x86)\OpenStack\Nova\etc\interfaces.template
- policy_file = C:\Program Files (x86)\OpenStack\Nova\etc\policy.json
+ policy_file = C:\Program Files (x86)\OpenStack\Nova\etc\policy.yaml
mkisofs_cmd = C:\Program Files (x86)\OpenStack\Nova\bin\mkisofs.exe
allow_resize_to_same_host = true
running_deleted_instance_action = reap
@@ -366,17 +344,19 @@ on Hyper-V. Below is a sample ``nova.conf`` for Windows:
logfile = nova-compute.log
instance_usage_audit = true
instance_usage_audit_period = hour
- use_neutron = True
+
[glance]
api_servers = http://IP_ADDRESS:9292
+
[neutron]
- url = http://IP_ADDRESS:9696
+ endpoint_override = http://IP_ADDRESS:9696
auth_strategy = keystone
project_name = service
username = neutron
password = Passw0rd
auth_url = http://IP_ADDRESS:5000/v3
auth_type = password
+
[hyperv]
vswitch_name = newVSwitch0
limit_cpu_features = false
@@ -385,12 +365,13 @@ on Hyper-V. Below is a sample ``nova.conf`` for Windows:
config_drive_cdrom = true
dynamic_memory_ratio = 1
enable_instance_metrics_collection = true
+
[rdp]
enabled = true
html5_proxy_base_url = https://IP_ADDRESS:4430
Prepare images for use with Hyper-V
------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Hyper-V currently supports only the VHD and VHDX file format for virtual
machine instances. Detailed instructions for installing virtual machines on
@@ -404,8 +385,12 @@ image to `glance` using the `openstack-client`:
.. code-block:: none
- PS C:\> openstack image create --name "VM_IMAGE_NAME" --property hypervisor_type=hyperv --public \
- --container-format bare --disk-format vhd
+ PS C:\> openstack image create \
+ --name "VM_IMAGE_NAME" \
+ --property hypervisor_type=hyperv \
+ --public \
+ --container-format bare \
+ --disk-format vhd
.. note::
@@ -419,12 +404,12 @@ image to `glance` using the `openstack-client`:
PS C:\> New-VHD DISK_NAME.vhd -SizeBytes VHD_SIZE
Inject interfaces and routes
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``interfaces.template`` file describes the network interfaces and routes
available on your system and how to activate them. You can specify the location
-of the file with the ``injected_network_template`` configuration option in
-``/etc/nova/nova.conf``.
+of the file with the :oslo.config:option:`injected_network_template`
+configuration option in ``nova.conf``.
.. code-block:: ini
@@ -433,7 +418,7 @@ of the file with the ``injected_network_template`` configuration option in
A default template exists in ``nova/virt/interfaces.template``.
Run Compute with Hyper-V
-------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~
To start the ``nova-compute`` service, run this command from a console in the
Windows server:
@@ -442,8 +427,9 @@ Windows server:
PS C:\> C:\Python27\python.exe c:\Python27\Scripts\nova-compute --config-file c:\etc\nova\nova.conf
-Troubleshoot Hyper-V configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Troubleshooting
+---------------
* I ran the :command:`nova-manage service list` command from my controller;
however, I'm not seeing smiley faces for Hyper-V compute nodes, what do I do?
diff --git a/doc/source/admin/configuration/hypervisor-ironic.rst b/doc/source/admin/configuration/hypervisor-ironic.rst
new file mode 100644
index 00000000000..bba01deffa5
--- /dev/null
+++ b/doc/source/admin/configuration/hypervisor-ironic.rst
@@ -0,0 +1,58 @@
+======
+Ironic
+======
+
+Introduction
+------------
+
+The ironic hypervisor driver wraps the Bare Metal (ironic) API,
+enabling Nova to provision baremetal resources using the same
+user-facing API as for server management.
+
+This is the only driver in nova where one compute service can map to many
+hosts, meaning a ``nova-compute`` service can manage multiple ``ComputeNodes``.
+An ironic driver managed compute service uses the ironic ``node uuid`` for the
+compute node ``hypervisor_hostname`` (nodename) and ``uuid`` fields. The
+relationship of ``instance:compute node:ironic node`` is 1:1:1.
+
+Scheduling of bare metal nodes is based on custom resource classes, specified
+via the ``resource_class`` property on a node and a corresponding resource
+property on a flavor (see the :ironic-doc:`flavor documentation
+`).
+The RAM and CPU settings on a flavor are ignored, and the disk is only used to
+determine the root partition size when a partition image is used (see the
+:ironic-doc:`image documentation
+`).
+
+
+Configuration
+-------------
+
+- :ironic-doc:`Configure the Compute service to use the Bare Metal service
+ `.
+
+- :ironic-doc:`Create flavors for use with the Bare Metal service
+ `.
+
+- :ironic-doc:`Conductors Groups `.
+
+
+Scaling and performance issues
+------------------------------
+
+- The ``update_available_resource`` periodic task reports all the resources
+ managed by Ironic. Depending the number of nodes, it can take a lot of time.
+ The nova-compute will not perform any other operations when this task is
+ running. You can use conductor groups to help scale, by setting
+ :oslo.config:option:`ironic.partition_key`.
+
+
+Known limitations / Missing features
+------------------------------------
+
+* Migrate
+* Resize
+* Snapshot
+* Pause
+* Shelve
+* Evacuate
diff --git a/doc/source/admin/configuration/hypervisor-kvm.rst b/doc/source/admin/configuration/hypervisor-kvm.rst
index 159d065a855..fdadde32f9a 100644
--- a/doc/source/admin/configuration/hypervisor-kvm.rst
+++ b/doc/source/admin/configuration/hypervisor-kvm.rst
@@ -2,9 +2,6 @@
KVM
===
-.. todo:: This is really installation guide material and should probably be
- moved.
-
KVM is configured as the default hypervisor for Compute.
.. note::
@@ -15,16 +12,6 @@ KVM is configured as the default hypervisor for Compute.
on qemu-kvm, which installs ``/lib/udev/rules.d/45-qemu-kvm.rules``, which
sets the correct permissions on the ``/dev/kvm`` device node.
-To enable KVM explicitly, add the following configuration options to the
-``/etc/nova/nova.conf`` file:
-
-.. code-block:: ini
-
- compute_driver = libvirt.LibvirtDriver
-
- [libvirt]
- virt_type = kvm
-
The KVM hypervisor supports the following virtual machine image formats:
* Raw
@@ -35,38 +22,47 @@ The KVM hypervisor supports the following virtual machine image formats:
This section describes how to enable KVM on your system. For more information,
see the following distribution-specific documentation:
-* `Fedora: Virtualization Getting Started Guide `_
- from the Fedora 22 documentation.
-* `Ubuntu: KVM/Installation `_ from the Community Ubuntu documentation.
-* `Debian: Virtualization with KVM `_ from the Debian handbook.
-* `Red Hat Enterprise Linux: Installing virtualization packages on an existing
- Red Hat Enterprise Linux system `_ from the ``Red Hat Enterprise Linux
- Virtualization Host Configuration and Guest Installation Guide``.
-* `openSUSE: Installing KVM `_
- from the openSUSE Virtualization with KVM manual.
-* `SLES: Installing KVM `_ from the SUSE Linux Enterprise Server
- ``Virtualization Guide``.
+* `Fedora: Virtualization Getting Started Guide`__
+* `Ubuntu: KVM/Installation`__
+* `Debian: KVM Guide`__
+* `Red Hat Enterprise Linux (RHEL): Getting started with virtualization`__
+* `openSUSE: Setting Up a KVM VM Host Server`__
+* `SLES: Virtualization with KVM`__.
+
+.. __: https://docs.fedoraproject.org/en-US/quick-docs/getting-started-with-virtualization/
+.. __: https://help.ubuntu.com/community/KVM/Installation
+.. __: https://wiki.debian.org/KVM
+.. __: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/configuring_and_managing_virtualization/getting-started-with-virtualization-in-rhel-8_configuring-and-managing-virtualization
+.. __: https://doc.opensuse.org/documentation/leap/virtualization/html/book-virt/cha-qemu-host.html
+.. __: https://documentation.suse.com/sles/11-SP4/html/SLES-all/book-kvm.html
+
+
+Configuration
+-------------
+
+To enable KVM explicitly, add the following configuration options to the
+``/etc/nova/nova.conf`` file:
+
+.. code-block:: ini
+
+ [DEFAULT]
+ compute_driver = libvirt.LibvirtDriver
+
+ [libvirt]
+ virt_type = kvm
+
.. _enable-kvm:
Enable KVM
-~~~~~~~~~~
+----------
The following sections outline how to enable KVM based hardware virtualization
on different architectures and platforms. To perform these steps, you must be
logged in as the ``root`` user.
-For x86 based systems
----------------------
+For x86-based systems
+~~~~~~~~~~~~~~~~~~~~~
#. To determine whether the ``svm`` or ``vmx`` CPU extensions are present, run
this command:
@@ -135,8 +131,7 @@ system or find a system with this support.
and enable the VT option.
If KVM acceleration is not supported, configure Compute to use a different
-hypervisor, such as ``QEMU`` or ``Xen``. See :ref:`compute_qemu` or
-:ref:`compute_xen_api` for details.
+hypervisor, such as :ref:`QEMU `.
These procedures help you load the kernel modules for Intel-based and AMD-based
processors if they do not load automatically during KVM installation.
@@ -176,8 +171,8 @@ Add these lines to ``/etc/modules`` file so that these modules load on reboot:
kvm
kvm-amd
-For POWER based systems
------------------------
+For POWER-based systems
+~~~~~~~~~~~~~~~~~~~~~~~
KVM as a hypervisor is supported on POWER system's PowerNV platform.
@@ -225,15 +220,22 @@ KVM as a hypervisor is supported on POWER system's PowerNV platform.
Because a KVM installation can change user group membership, you might need
to log in again for changes to take effect.
+For AArch64-based systems
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. todo:: Populate this section.
+
+
Configure Compute backing storage
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------
Backing Storage is the storage used to provide the expanded operating system
image, and any ephemeral storage. Inside the virtual machine, this is normally
presented as two virtual hard disks (for example, ``/dev/vda`` and ``/dev/vdb``
respectively). However, inside OpenStack, this can be derived from one of these
methods: ``lvm``, ``qcow``, ``rbd`` or ``flat``, chosen using the
-``images_type`` option in ``nova.conf`` on the compute node.
+:oslo.config:option:`libvirt.images_type` option in ``nova.conf`` on the
+compute node.
.. note::
@@ -241,7 +243,8 @@ methods: ``lvm``, ``qcow``, ``rbd`` or ``flat``, chosen using the
Flat back end uses either raw or QCOW2 storage. It never uses a backing
store, so when using QCOW2 it copies an image rather than creating an
overlay. By default, it creates raw files but will use QCOW2 when creating a
- disk from a QCOW2 if ``force_raw_images`` is not set in configuration.
+ disk from a QCOW2 if :oslo.config:option:`force_raw_images` is not set in
+ configuration.
QCOW is the default backing store. It uses a copy-on-write philosophy to delay
allocation of storage until it is actually needed. This means that the space
@@ -255,88 +258,134 @@ reserved on the physical disk.
Local `LVM volumes
`__ can also be
-used. Set ``images_volume_group = nova_local`` where ``nova_local`` is the name
-of the LVM group you have created.
+used. Set the :oslo.config:option:`libvirt.images_volume_group` configuration
+option to the name of the LVM group you have created.
-Specify the CPU model of KVM guests
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The Compute service enables you to control the guest CPU model that is exposed
-to KVM virtual machines. Use cases include:
+Direct download of images from Ceph
+-----------------------------------
-* To maximize performance of virtual machines by exposing new host CPU features
- to the guest
+When the Glance image service is set up with the Ceph backend and Nova is using
+a local ephemeral store (``[libvirt]/images_type!=rbd``), it is possible to
+configure Nova to download images directly into the local compute image cache.
-* To ensure a consistent default CPU across all machines, removing reliance of
- variable QEMU defaults
+With the following configuration, images are downloaded using the RBD export
+command instead of using the Glance HTTP API. In some situations, especially
+for very large images, this could be substantially faster and can improve the
+boot times of instances.
-In libvirt, the CPU is specified by providing a base CPU model name (which is a
-shorthand for a set of feature flags), a set of additional feature flags, and
-the topology (sockets/cores/threads). The libvirt KVM driver provides a number
-of standard CPU model names. These models are defined in the
-``/usr/share/libvirt/cpu_map.xml`` file. Check this file to determine which
-models are supported by your local installation.
+On the Glance API node in ``glance-api.conf``:
-Two Compute configuration options in the ``[libvirt]`` group of ``nova.conf``
-define which type of CPU model is exposed to the hypervisor when using KVM:
-``cpu_mode`` and ``cpu_model``.
+.. code-block:: ini
-The ``cpu_mode`` option can take one of the following values: ``none``,
-``host-passthrough``, ``host-model``, and ``custom``.
+ [DEFAULT]
+ show_image_direct_url=true
-Host model (default for KVM & QEMU)
------------------------------------
+On the Nova compute node in nova.conf:
-If your ``nova.conf`` file contains ``cpu_mode=host-model``, libvirt identifies
-the CPU model in ``/usr/share/libvirt/cpu_map.xml`` file that most closely
-matches the host, and requests additional CPU flags to complete the match. This
-configuration provides the maximum functionality and performance and maintains
-good reliability and compatibility if the guest is migrated to another host
-with slightly different host CPUs.
+.. code-block:: ini
-Host pass through
------------------
+ [glance]
+ enable_rbd_download=true
+ rbd_user=glance
+ rbd_pool=images
+ rbd_ceph_conf=/etc/ceph/ceph.conf
+ rbd_connect_timeout=5
-If your ``nova.conf`` file contains ``cpu_mode=host-passthrough``, libvirt
-tells KVM to pass through the host CPU with no modifications. The difference
-to host-model, instead of just matching feature flags, every last detail of the
-host CPU is matched. This gives the best performance, and can be important to
-some apps which check low level CPU details, but it comes at a cost with
-respect to migration. The guest can only be migrated to a matching host CPU.
-Custom
-------
+Nested guest support
+--------------------
-If your ``nova.conf`` file contains ``cpu_mode=custom``, you can explicitly
-specify one of the supported named models using the cpu_model configuration
-option. For example, to configure the KVM guests to expose Nehalem CPUs, your
-``nova.conf`` file should contain:
+You may choose to enable support for nested guests --- that is, allow
+your Nova instances to themselves run hardware-accelerated virtual
+machines with KVM. Doing so requires a module parameter on
+your KVM kernel module, and corresponding ``nova.conf`` settings.
-.. code-block:: ini
+Host configuration
+~~~~~~~~~~~~~~~~~~
- [libvirt]
- cpu_mode = custom
- cpu_model = Nehalem
+To enable nested KVM guests, your compute node must load the
+``kvm_intel`` or ``kvm_amd`` module with ``nested=1``. You can enable
+the ``nested`` parameter permanently, by creating a file named
+``/etc/modprobe.d/kvm.conf`` and populating it with the following
+content:
+
+.. code-block:: none
+
+ options kvm_intel nested=1
+ options kvm_amd nested=1
+
+A reboot may be required for the change to become effective.
+
+Nova configuration
+~~~~~~~~~~~~~~~~~~
+
+To support nested guests, you must set your
+:oslo.config:option:`libvirt.cpu_mode` configuration to one of the following
+options:
-None (default for all libvirt-driven hypervisors other than KVM & QEMU)
------------------------------------------------------------------------
+Host passthrough (``host-passthrough``)
+ In this mode, nested virtualization is automatically enabled once
+ the KVM kernel module is loaded with nesting support.
-If your ``nova.conf`` file contains ``cpu_mode=none``, libvirt does not specify
-a CPU model. Instead, the hypervisor chooses the default model.
+ .. code-block:: ini
-Guest agent support
--------------------
+ [libvirt]
+ cpu_mode = host-passthrough
-Use guest agents to enable optional access between compute nodes and guests
-through a socket, using the QMP protocol.
+ However, do consider the other implications that
+ :doc:`host passthrough ` mode has on compute
+ functionality.
+
+Host model (``host-model``)
+ In this mode, nested virtualization is automatically enabled once
+ the KVM kernel module is loaded with nesting support, **if** the
+ matching CPU model exposes the ``vmx`` feature flag to guests by
+ default (you can verify this with ``virsh capabilities`` on your
+ compute node). If your CPU model does not pass in the ``vmx`` flag,
+ you can force it with :oslo.config:option:`libvirt.cpu_model_extra_flags`:
+
+ .. code-block:: ini
+
+ [libvirt]
+ cpu_mode = host-model
+ cpu_model_extra_flags = vmx
+
+ Again, consider the other implications that apply to the
+ :doc:`host model ` mode.
+
+Custom (``custom``)
+ In custom mode, the same considerations apply as in host-model mode,
+ but you may *additionally* want to ensure that libvirt passes not only
+ the ``vmx``, but also the ``pcid`` flag to its guests:
+
+ .. code-block:: ini
+
+ [libvirt]
+ cpu_mode = custom
+ cpu_models = IvyBridge
+ cpu_model_extra_flags = vmx,pcid
+
+More information on CPU models can be found in :doc:`/admin/cpu-models`.
+
+Limitations
+~~~~~~~~~~~~
+
+When enabling nested guests, you should be aware of (and inform your
+users about) certain limitations that are currently inherent to nested
+KVM virtualization. Most importantly, guests using nested
+virtualization will, *while nested guests are running*,
+
+* fail to complete live migration;
+* fail to resume from suspend.
+
+See `the KVM documentation
+`_ for more
+information on these limitations.
-To enable this feature, you must set ``hw_qemu_guest_agent=yes`` as a metadata
-parameter on the image you wish to use to create the guest-agent-capable
-instances from. You can explicitly disable the feature by setting
-``hw_qemu_guest_agent=no`` in the image metadata.
KVM performance tweaks
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
The `VHostNet `_ kernel module improves
network performance. To load the kernel module, run the following command as
@@ -346,8 +395,9 @@ root:
# modprobe vhost_net
-Troubleshoot KVM
-~~~~~~~~~~~~~~~~
+
+Troubleshooting
+---------------
Trying to launch a new virtual machine instance fails with the ``ERROR`` state,
and the following error appears in the ``/var/log/nova/nova-compute.log`` file:
diff --git a/doc/source/admin/configuration/hypervisor-lxc.rst b/doc/source/admin/configuration/hypervisor-lxc.rst
index eb8d51f83ef..bc0988ccf6e 100644
--- a/doc/source/admin/configuration/hypervisor-lxc.rst
+++ b/doc/source/admin/configuration/hypervisor-lxc.rst
@@ -24,11 +24,17 @@ LXC than other hypervisors.
the hypervisor. See the `hypervisor support matrix
`_ for details.
-To enable LXC, ensure the following options are set in ``/etc/nova/nova.conf``
-on all hosts running the ``nova-compute`` service.
+
+Configuration
+-------------
+
+To enable LXC, configure :oslo.config:option:`DEFAULT.compute_driver` =
+``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` =
+``lxc``. For example:
.. code-block:: ini
+ [DEFAULT]
compute_driver = libvirt.LibvirtDriver
[libvirt]
diff --git a/doc/source/admin/configuration/hypervisor-powervm.rst b/doc/source/admin/configuration/hypervisor-powervm.rst
index 9b16c3a21d0..a2947ff6082 100644
--- a/doc/source/admin/configuration/hypervisor-powervm.rst
+++ b/doc/source/admin/configuration/hypervisor-powervm.rst
@@ -1,8 +1,10 @@
+=======
PowerVM
=======
Introduction
------------
+
OpenStack Compute supports the PowerVM hypervisor through `NovaLink`_. In the
NovaLink architecture, a thin NovaLink virtual machine running on the Power
system manages virtualization for that system. The ``nova-compute`` service
@@ -12,22 +14,27 @@ Management Console) is needed.
.. _NovaLink: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8eig/p8eig_kickoff.htm
+
Configuration
-------------
+
In order to function properly, the ``nova-compute`` service must be executed
by a member of the ``pvm_admin`` group. Use the ``usermod`` command to add the
-user. For example, to add the ``stacker`` user to the ``pvm_admin`` group, execute::
+user. For example, to add the ``stacker`` user to the ``pvm_admin`` group, execute:
+
+.. code-block:: console
- sudo usermod -a -G pvm_admin stacker
+ # usermod -a -G pvm_admin stacker
The user must re-login for the change to take effect.
-To enable the PowerVM compute driver, set the following configuration option
-in the ``/etc/nova/nova.conf`` file:
+To enable the PowerVM compute driver, configure
+:oslo.config:option:`DEFAULT.compute_driver` = ``powervm.PowerVMDriver``. For
+example:
.. code-block:: ini
- [Default]
+ [DEFAULT]
compute_driver = powervm.PowerVMDriver
The PowerVM driver supports two types of storage for ephemeral disks:
@@ -59,9 +66,10 @@ processor, whereas 0.05 means 1/20th of a physical processor. E.g.:
Volume Support
--------------
+
Volume support is provided for the PowerVM virt driver via Cinder. Currently,
-the only supported volume protocol is `vSCSI`_ Fibre Channel. Attach, detach,
+the only supported volume protocol is `vSCSI`__ Fibre Channel. Attach, detach,
and extend are the operations supported by the PowerVM vSCSI FC volume adapter.
-Boot from volume is not yet supported.
+:term:`Boot From Volume` is not yet supported.
-.. _vSCSI: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8hat/p8hat_virtualscsi.htm
+.. __: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8hat/p8hat_virtualscsi.htm
diff --git a/doc/source/admin/configuration/hypervisor-qemu.rst b/doc/source/admin/configuration/hypervisor-qemu.rst
index 6849b89c280..6cc72b04ae6 100644
--- a/doc/source/admin/configuration/hypervisor-qemu.rst
+++ b/doc/source/admin/configuration/hypervisor-qemu.rst
@@ -19,17 +19,24 @@ The typical uses cases for QEMU are
development or testing purposes, where the hypervisor does not
support native virtualization for guests.
-To enable QEMU, add these settings to ``nova.conf``:
+
+Configuration
+-------------
+
+To enable QEMU, configure :oslo.config:option:`DEFAULT.compute_driver` =
+``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` =
+``qemu``. For example:
.. code-block:: ini
+ [DEFAULT]
compute_driver = libvirt.LibvirtDriver
[libvirt]
virt_type = qemu
-For some operations you may also have to install the
-:command:`guestmount` utility:
+For some operations you may also have to install the :command:`guestmount`
+utility:
On Ubuntu:
diff --git a/doc/source/admin/configuration/hypervisor-virtuozzo.rst b/doc/source/admin/configuration/hypervisor-virtuozzo.rst
index 13c63daba62..354818949e0 100644
--- a/doc/source/admin/configuration/hypervisor-virtuozzo.rst
+++ b/doc/source/admin/configuration/hypervisor-virtuozzo.rst
@@ -12,11 +12,17 @@ image.
Some OpenStack Compute features may be missing when running with Virtuozzo
as the hypervisor. See :doc:`/user/support-matrix` for details.
-To enable Virtuozzo Containers, set the following options in
-``/etc/nova/nova.conf`` on all hosts running the ``nova-compute`` service.
+
+Configuration
+-------------
+
+To enable LXC, configure :oslo.config:option:`DEFAULT.compute_driver` =
+``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` =
+``parallels``. For example:
.. code-block:: ini
+ [DEFAULT]
compute_driver = libvirt.LibvirtDriver
force_raw_images = False
@@ -31,6 +37,7 @@ To enable Virtuozzo Virtual Machines, set the following options in
.. code-block:: ini
+ [DEFAULT]
compute_driver = libvirt.LibvirtDriver
[libvirt]
diff --git a/doc/source/admin/configuration/hypervisor-vmware.rst b/doc/source/admin/configuration/hypervisor-vmware.rst
index c7ffd11fe2f..9de1d0c2aef 100644
--- a/doc/source/admin/configuration/hypervisor-vmware.rst
+++ b/doc/source/admin/configuration/hypervisor-vmware.rst
@@ -3,7 +3,7 @@ VMware vSphere
==============
Introduction
-~~~~~~~~~~~~
+------------
OpenStack Compute supports the VMware vSphere product family and enables access
to advanced features such as vMotion, High Availability, and Dynamic Resource
@@ -23,15 +23,16 @@ vSphere features.
The following sections describe how to configure the VMware vCenter driver.
+
High-level architecture
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
The following diagram shows a high-level view of the VMware driver
architecture:
.. rubric:: VMware driver architecture
-.. figure:: /figures/vmware-nova-driver-architecture.jpg
+.. figure:: /_static/images/vmware-nova-driver-architecture.jpg
:width: 100%
As the figure shows, the OpenStack Compute Scheduler sees three hypervisors
@@ -56,12 +57,12 @@ visible in the OpenStack dashboard and you can manage it as you would any other
OpenStack VM. You can perform advanced vSphere operations in vCenter while you
configure OpenStack resources such as VMs through the OpenStack dashboard.
-The figure does not show how networking fits into the architecture. Both
-``nova-network`` and the OpenStack Networking Service are supported. For
+The figure does not show how networking fits into the architecture. For
details, see :ref:`vmware-networking`.
+
Configuration overview
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
To get started with the VMware vCenter driver, complete the following
high-level steps:
@@ -73,13 +74,12 @@ high-level steps:
#. Load desired VMDK images into the Image service. See :ref:`vmware-images`.
-#. Configure networking with either ``nova-network`` or
- the Networking service. See :ref:`vmware-networking`.
+#. Configure the Networking service (neutron). See :ref:`vmware-networking`.
.. _vmware-prereqs:
Prerequisites and limitations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
Use the following list to prepare a vSphere environment that runs with the
VMware vCenter driver:
@@ -110,8 +110,7 @@ Networking
Security groups
If you use the VMware driver with OpenStack Networking and the NSX plug-in,
- security groups are supported. If you use ``nova-network``, security groups
- are not supported.
+ security groups are supported.
.. note::
@@ -145,8 +144,9 @@ assigned to a separate availability zone. This is required as the OpenStack
Block Storage VMDK driver does not currently work across multiple vCenter
installations.
+
VMware vCenter service account
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------
OpenStack integration requires a vCenter service account with the following
minimum permissions. Apply the permissions to the ``Datacenter`` root object,
@@ -417,10 +417,11 @@ and select the :guilabel:`Propagate to Child Objects` option.
- Import
-
+
.. _vmware-vcdriver:
VMware vCenter driver
-~~~~~~~~~~~~~~~~~~~~~
+---------------------
Use the VMware vCenter driver (VMwareVCDriver) to connect OpenStack Compute
with vCenter. This recommended configuration enables access through vCenter to
@@ -428,7 +429,7 @@ advanced vSphere features like vMotion, High Availability, and Dynamic Resource
Scheduling (DRS).
VMwareVCDriver configuration options
-------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Add the following VMware-specific configuration options to the ``nova.conf``
file:
@@ -481,10 +482,11 @@ against host failures.
Many ``nova.conf`` options are relevant to libvirt but do not apply to this
driver.
+
.. _vmware-images:
Images with VMware vSphere
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------
The vCenter driver supports images in the VMDK format. Disks in this format can
be obtained from VMware Fusion or from an ESX environment. It is also possible
@@ -495,7 +497,7 @@ sections provide additional details on the supported disks and the commands
used for conversion and upload.
Supported image types
----------------------
+~~~~~~~~~~~~~~~~~~~~~
Upload images to the OpenStack Image service in VMDK format. The following
VMDK disk types are supported:
@@ -748,7 +750,7 @@ of the supported guest OS:
- Windows XP Professional
Convert and load images
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
Using the ``qemu-img`` utility, disk images in several formats (such as,
qcow2) can be converted to the VMDK format.
@@ -809,12 +811,12 @@ is lsiLogic, which is SCSI, so you can omit the ``vmware_adaptertype`` property
if you are certain that the image adapter type is lsiLogic.
Tag VMware images
------------------
+~~~~~~~~~~~~~~~~~
In a mixed hypervisor environment, OpenStack Compute uses the
``hypervisor_type`` tag to match images to the correct hypervisor type. For
VMware images, set the hypervisor type to ``vmware``. Other valid hypervisor
-types include: ``hyperv``, ``ironic``, ``lxc``, ``qemu``, ``uml``, and ``xen``.
+types include: ``hyperv``, ``ironic``, ``lxc``, and ``qemu``.
Note that ``qemu`` is used for both QEMU and KVM hypervisor types.
.. code-block:: console
@@ -829,7 +831,7 @@ Note that ``qemu`` is used for both QEMU and KVM hypervisor types.
ubuntu-thick-scsi < ubuntuLTS-flat.vmdk
Optimize images
----------------
+~~~~~~~~~~~~~~~
Monolithic Sparse disks are considerably faster to download but have the
overhead of an additional conversion step. When imported into ESX, sparse disks
@@ -888,7 +890,7 @@ In the previous cases, the converted vmdk is actually a pair of files:
The file to be uploaded to the Image service is ``converted-flat.vmdk``.
Image handling
---------------
+~~~~~~~~~~~~~~
The ESX hypervisor requires a copy of the VMDK file in order to boot up a
virtual machine. As a result, the vCenter OpenStack Compute driver must
@@ -902,7 +904,7 @@ Image service.
Even with a cached VMDK, there is still a copy operation from the cache
location to the hypervisor file directory in the shared data store. To avoid
this copy, boot the image in linked_clone mode. To learn how to enable this
-mode, see :ref:`vmware-config`.
+mode, see :oslo.config:option:`vmware.use_linked_clone`.
.. note::
@@ -926,61 +928,30 @@ cached images are stored.
have a shared file system.
You can automatically purge unused images after a specified period of time. To
-configure this action, set these options in the ``DEFAULT`` section in the
-``nova.conf`` file:
+configure this action, set these options in the :oslo.config:group`image_cache`
+section in the ``nova.conf`` file:
-``remove_unused_base_images``
- Set this option to ``True`` to specify that unused images should be removed
- after the duration specified in the
- ``remove_unused_original_minimum_age_seconds`` option. The default is
- ``True``.
+* :oslo.config:option:`image_cache.remove_unused_base_images`
+* :oslo.config:option:`image_cache.remove_unused_original_minimum_age_seconds`
-``remove_unused_original_minimum_age_seconds``
- Specifies the duration in seconds after which an unused image is purged from
- the cache. The default is ``86400`` (24 hours).
.. _vmware-networking:
Networking with VMware vSphere
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The VMware driver supports networking with the ``nova-network`` service or the
-Networking Service. Depending on your installation, complete these
-configuration steps before you provision VMs:
-
-#. **The nova-network service with the FlatManager or FlatDHCPManager**.
- Create a port group with the same name as the ``flat_network_bridge`` value
- in the ``nova.conf`` file. The default value is ``br100``. If you specify
- another value, the new value must be a valid Linux bridge identifier that
- adheres to Linux bridge naming conventions.
-
- All VM NICs are attached to this port group.
-
- Ensure that the flat interface of the node that runs the ``nova-network``
- service has a path to this network.
+------------------------------
- .. note::
+The VMware driver supports networking with the Networking Service (neutron).
+Depending on your installation, complete these configuration steps before you
+provision VMs:
- When configuring the port binding for this port group in vCenter, specify
- ``ephemeral`` for the port binding type. For more information, see
- `Choosing a port binding type in ESX/ESXi `_ in the VMware Knowledge Base.
-
-#. **The nova-network service with the VlanManager**.
- Set the ``vlan_interface`` configuration option to match the ESX host
- interface that handles VLAN-tagged VM traffic.
-
- OpenStack Compute automatically creates the corresponding port groups.
-
-#. If you are using the OpenStack Networking Service:
- Before provisioning VMs, create a port group with the same name as the
+#. Before provisioning VMs, create a port group with the same name as the
``vmware.integration_bridge`` value in ``nova.conf`` (default is
``br-int``). All VM NICs are attached to this port group for management by
the OpenStack Networking plug-in.
+
Volumes with VMware vSphere
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------
The VMware driver supports attaching volumes from the Block Storage service.
The VMware VMDK driver for OpenStack Block Storage is recommended and should be
@@ -990,153 +961,20 @@ this has not yet been imported and published). Also an
iSCSI volume driver provides limited support and can be used only for
attachments.
-.. _vmware-config:
-
-Configuration reference
-~~~~~~~~~~~~~~~~~~~~~~~
-
-To customize the VMware driver, use the configuration option settings below.
-
-.. TODO(sdague): for the import we just copied this in from the auto generated
- file. We probably need a strategy for doing equivalent autogeneration, but
- we don't as of yet.
-
- Warning: Do not edit this file. It is automatically generated from the
- software project's code and your changes will be overwritten.
-
- The tool to generate this file lives in openstack-doc-tools repository.
-
- Please make any changes needed in the code, then run the
- autogenerate-config-doc tool from the openstack-doc-tools repository, or
- ask for help on the documentation mailing list, IRC channel or meeting.
-
-.. _nova-vmware:
-
-.. list-table:: Description of VMware configuration options
- :header-rows: 1
- :class: config-ref-table
-
- * - Configuration option = Default value
- - Description
- * - **[vmware]**
- -
- * - ``api_retry_count`` = ``10``
- - (Integer) Number of times VMware vCenter server API must be retried on connection failures, e.g. socket error, etc.
- * - ``ca_file`` = ``None``
- - (String) Specifies the CA bundle file to be used in verifying the vCenter server certificate.
- * - ``cache_prefix`` = ``None``
- - (String) This option adds a prefix to the folder where cached images are stored
-
- This is not the full path - just a folder prefix. This should only be used when a datastore cache is shared between compute nodes.
- .. note::
-
- This should only be used when the compute nodes are running on same host or they have a shared file system.
-
- Possible values:
-
- * Any string representing the cache prefix to the folder
- * - ``cluster_name`` = ``None``
- - (String) Name of a VMware Cluster ComputeResource.
- * - ``console_delay_seconds`` = ``None``
- - (Integer) Set this value if affected by an increased network latency causing repeated characters when typing in a remote console.
- * - ``datastore_regex`` = ``None``
- - (String) Regular expression pattern to match the name of datastore.
-
- The datastore_regex setting specifies the datastores to use with Compute. For example, datastore_regex="nas.*" selects all the data stores that have a name starting with "nas".
-
- .. note::
-
- If no regex is given, it just picks the datastore with the most freespace.
-
- Possible values:
-
- * Any matching regular expression to a datastore must be given
- * - ``host_ip`` = ``None``
- - (String) Hostname or IP address for connection to VMware vCenter host.
- * - ``host_password`` = ``None``
- - (String) Password for connection to VMware vCenter host.
- * - ``host_port`` = ``443``
- - (Port number) Port for connection to VMware vCenter host.
- * - ``host_username`` = ``None``
- - (String) Username for connection to VMware vCenter host.
- * - ``insecure`` = ``False``
- - (Boolean) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification.
-
- Related options:
-
- * ca_file: This option is ignored if "ca_file" is set.
- * - ``integration_bridge`` = ``None``
- - (String) This option should be configured only when using the NSX-MH Neutron plugin. This is the name of the integration bridge on the ESXi server or host. This should not be set for any other Neutron plugin. Hence the default value is not set.
-
- Possible values:
-
- * Any valid string representing the name of the integration bridge
- * - ``maximum_objects`` = ``100``
- - (Integer) This option specifies the limit on the maximum number of objects to return in a single result.
-
- A positive value will cause the operation to suspend the retrieval when the count of objects reaches the specified limit. The server may still limit the count to something less than the configured value. Any remaining objects may be retrieved with additional requests.
- * - ``pbm_default_policy`` = ``None``
- - (String) This option specifies the default policy to be used.
-
- If pbm_enabled is set and there is no defined storage policy for the specific request, then this policy will be used.
-
- Possible values:
-
- * Any valid storage policy such as VSAN default storage policy
-
- Related options:
-
- * pbm_enabled
- * - ``pbm_enabled`` = ``False``
- - (Boolean) This option enables or disables storage policy based placement of instances.
-
- Related options:
-
- * pbm_default_policy
- * - ``pbm_wsdl_location`` = ``None``
- - (String) This option specifies the PBM service WSDL file location URL.
-
- Setting this will disable storage policy based placement of instances.
-
- Possible values:
-
- * Any valid file path e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl
- * - ``serial_port_proxy_uri`` = ``None``
- - (String) Identifies a proxy service that provides network access to the serial_port_service_uri.
-
- Possible values:
-
- * Any valid URI
-
- Related options: This option is ignored if serial_port_service_uri is not specified.
-
- * serial_port_service_uri
- * - ``serial_port_service_uri`` = ``None``
- - (String) Identifies the remote system where the serial port traffic will be sent.
-
- This option adds a virtual serial port which sends console output to a configurable service URI. At the service URI address there will be virtual serial port concentrator that will collect console logs. If this is not set, no serial ports will be added to the created VMs.
-
- Possible values:
-
- * Any valid URI
- * - ``task_poll_interval`` = ``0.5``
- - (Floating point) Time interval in seconds to poll remote tasks invoked on VMware VC server.
- * - ``use_linked_clone`` = ``True``
- - (Boolean) This option enables/disables the use of linked clone.
-
- The ESX hypervisor requires a copy of the VMDK file in order to boot up a virtual machine. The compute driver must download the VMDK via HTTP from the OpenStack Image service to a datastore that is visible to the hypervisor and cache it. Subsequent virtual machines that need the VMDK use the cached version and don't have to copy the file again from the OpenStack Image service.
-
- If set to false, even with a cached VMDK, there is still a copy operation from the cache location to the hypervisor file directory in the shared datastore. If set to true, the above copy operation is avoided as it creates copy of the virtual machine that shares virtual disks with its parent VM.
- * - ``wsdl_location`` = ``None``
- - (String) This option specifies VIM Service WSDL Location
-
- If vSphere API versions 5.1 and later is being used, this section can be ignored. If version is less than 5.1, WSDL files must be hosted locally and their location must be specified in the above section.
+Troubleshooting
+---------------
- Optional over-ride to default location for bug work-arounds.
+Operators can troubleshoot VMware specific failures by correlating OpenStack
+logs to vCenter logs. Every RPC call which is made by an OpenStack driver has
+an ``opID`` which can be traced in the vCenter logs. For example consider the
+following excerpt from a ``nova-compute`` log:
- Possible values:
+.. code-block:: console
- * http:///vimService.wsdl
+ Aug 15 07:31:09 localhost nova-compute[16683]: DEBUG oslo_vmware.service [-] Invoking Folder.CreateVM_Task with opID=oslo.vmware-debb6064-690e-45ac-b0ae-1b94a9638d1f {{(pid=16683) request_handler /opt/stack/oslo.vmware/oslo_vmware/service.py:355}}
- * file:///opt/stack/vmware/SDK/wsdl/vim25/vimService.wsdl
+In this case the ``opID`` is
+``oslo.vmware-debb6064-690e-45ac-b0ae-1b94a9638d1f`` and we can grep the
+vCenter log (usually ``/var/log/vmware/vpxd/vpxd.log``) for it to
+find if anything went wrong with the ``CreateVM`` operation.
diff --git a/doc/source/admin/configuration/hypervisor-xen-api.rst b/doc/source/admin/configuration/hypervisor-xen-api.rst
deleted file mode 100644
index 082a37bcbcb..00000000000
--- a/doc/source/admin/configuration/hypervisor-xen-api.rst
+++ /dev/null
@@ -1,468 +0,0 @@
-.. _compute_xen_api:
-
-=============================================
-XenServer (and other XAPI based Xen variants)
-=============================================
-
-.. todo::
-
- os-xenapi version is 0.3.1 currently.
- This document should be modified according to the new version.
- This todo has been reported as `bug 1718606`_.
-
-.. _bug 1718606: https://bugs.launchpad.net/nova/+bug/1718606
-
-
-This section describes XAPI managed hypervisors, and how to use them with
-OpenStack.
-
-Terminology
-~~~~~~~~~~~
-
-Xen
----
-
-A hypervisor that provides the fundamental isolation between virtual machines.
-Xen is open source (GPLv2) and is managed by `XenProject.org
-`_, a cross-industry organization and a Linux
-Foundation Collaborative project.
-
-Xen is a component of many different products and projects. The hypervisor
-itself is very similar across all these projects, but the way that it is
-managed can be different, which can cause confusion if you're not clear which
-toolstack you are using. Make sure you know what `toolstack
-`_ you want before you get
-started. If you want to use Xen with libvirt in OpenStack Compute refer to
-:doc:`hypervisor-xen-libvirt`.
-
-XAPI
-----
-
-XAPI is one of the toolstacks that could control a Xen based hypervisor.
-XAPI's role is similar to libvirt's in the KVM world. The API provided by XAPI
-is called XenAPI. To learn more about the provided interface, look at `XenAPI
-Object Model Overview `_ for definitions of XAPI
-specific terms such as SR, VDI, VIF and PIF.
-
-OpenStack has a compute driver which talks to XAPI, therefore all XAPI managed
-servers could be used with OpenStack.
-
-XenAPI
-------
-
-XenAPI is the API provided by XAPI. This name is also used by the python
-library that is a client for XAPI. A set of packages to use XenAPI on existing
-distributions can be built using the `xenserver/buildroot
-`_ project.
-
-XenServer
----------
-
-An Open Source virtualization platform that delivers all features needed for
-any server and datacenter implementation including the Xen hypervisor and XAPI
-for the management. For more information and product downloads, visit
-`xenserver.org `_.
-
-XCP
----
-
-XCP is not supported anymore. XCP project recommends all XCP users to upgrade
-to the latest version of XenServer by visiting `xenserver.org
-`_.
-
-Privileged and unprivileged domains
------------------------------------
-
-A Xen host runs a number of virtual machines, VMs, or domains (the terms are
-synonymous on Xen). One of these is in charge of running the rest of the
-system, and is known as domain 0, or dom0. It is the first domain to boot after
-Xen, and owns the storage and networking hardware, the device drivers, and the
-primary control software. Any other VM is unprivileged, and is known as a domU
-or guest. All customer VMs are unprivileged, but you should note that on
-XenServer (and other XenAPI using hypervisors), the OpenStack Compute service
-(``nova-compute``) also runs in a domU. This gives a level of security
-isolation between the privileged system software and the OpenStack software
-(much of which is customer-facing). This architecture is described in more
-detail later.
-
-Paravirtualized versus hardware virtualized domains
----------------------------------------------------
-
-A Xen virtual machine can be paravirtualized (PV) or hardware virtualized
-(HVM). This refers to the interaction between Xen, domain 0, and the guest VM's
-kernel. PV guests are aware of the fact that they are virtualized and will
-co-operate with Xen and domain 0; this gives them better performance
-characteristics. HVM guests are not aware of their environment, and the
-hardware has to pretend that they are running on an unvirtualized machine. HVM
-guests do not need to modify the guest operating system, which is essential
-when running Windows.
-
-In OpenStack, customer VMs may run in either PV or HVM mode. However, the
-OpenStack domU (that's the one running ``nova-compute``) must be running in PV
-mode.
-
-xapi pool
----------
-
-A resource pool comprises multiple XenServer host installations, bound together
-into a single managed entity which can host virtual machines. When combined with
-shared storage, VMs could dynamically move between XenServer hosts, with minimal
-downtime since no block copying is needed.
-
-XenAPI deployment architecture
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A basic OpenStack deployment on a XAPI-managed server, assuming that the
-network provider is neutron network, looks like this:
-
-.. figure:: /figures/xenserver_architecture.png
- :width: 100%
-
-Key things to note:
-
-* The hypervisor: Xen
-
-* Domain 0: runs XAPI and some small pieces from OpenStack,
- the XAPI plug-ins.
-
-* OpenStack VM: The ``Compute`` service runs in a paravirtualized virtual
- machine, on the host under management. Each host runs a local instance of
- ``Compute``. It is also running neutron plugin-agent
- (``neutron-openvswitch-agent``) to perform local vSwitch configuration.
-
-* OpenStack Compute uses the XenAPI Python library to talk to XAPI, and it uses
- the Management Network to reach from the OpenStack VM to Domain 0.
-
-Some notes on the networking:
-
-* The above diagram assumes DHCP networking.
-
-* There are three main OpenStack networks:
-
- * Management network: RabbitMQ, MySQL, inter-host communication, and
- compute-XAPI communication. Please note that the VM images are downloaded
- by the XenAPI plug-ins, so make sure that the OpenStack Image service is
- accessible through this network. It usually means binding those services to
- the management interface.
-
- * Tenant network: controlled by neutron, this is used for tenant traffic.
-
- * Public network: floating IPs, public API endpoints.
-
-* The networks shown here must be connected to the corresponding physical
- networks within the data center. In the simplest case, three individual
- physical network cards could be used. It is also possible to use VLANs to
- separate these networks. Please note, that the selected configuration must be
- in line with the networking model selected for the cloud. (In case of VLAN
- networking, the physical channels have to be able to forward the tagged
- traffic.)
-
-* With the Networking service, you should enable Linux bridge in ``Dom0`` which
- is used for Compute service. ``nova-compute`` will create Linux bridges for
- security group and ``neutron-openvswitch-agent`` in Compute node will apply
- security group rules on these Linux bridges. To implement this, you need to
- remove ``/etc/modprobe.d/blacklist-bridge*`` in ``Dom0``.
-
-Further reading
-~~~~~~~~~~~~~~~
-
-Here are some of the resources available to learn more about Xen:
-
-* `Citrix XenServer official documentation
- `_
-* `What is Xen? by XenProject.org
- `_
-* `Xen Hypervisor project
- `_
-* `Xapi project `_
-* `Further XenServer and OpenStack information
- `_
-
-Install XenServer
-~~~~~~~~~~~~~~~~~
-
-Before you can run OpenStack with XenServer, you must install the hypervisor on
-`an appropriate server `_.
-
-.. note::
-
- Xen is a type 1 hypervisor: When your server starts, Xen is the first
- software that runs. Consequently, you must install XenServer before you
- install the operating system where you want to run OpenStack code. You then
- install ``nova-compute`` into a dedicated virtual machine on the host.
-
-Use the following link to download XenServer's installation media:
-
-* http://xenserver.org/open-source-virtualization-download.html
-
-When you install many servers, you might find it easier to perform `PXE boot
-installations `_. You can also package any
-post-installation changes that you want to make to your XenServer by following
-the instructions of `creating your own XenServer supplemental pack
-`_.
-
-.. important::
-
- When using ``[xenserver]image_handler=direct_vhd`` (the default), make sure
- you use the EXT type of storage repository (SR). Features that require access
- to VHD files (such as copy on write, snapshot and migration) do not work when
- you use the LVM SR. Storage repository (SR) is a XAPI-specific term relating to
- the physical storage where virtual disks are stored.
-
- On the XenServer installation screen, choose the :guilabel:`XenDesktop
- Optimized` option. If you use an answer file, make sure you use
- ``srtype="ext"`` in the ``installation`` tag of the answer file.
-
-Post-installation steps
-~~~~~~~~~~~~~~~~~~~~~~~
-
-The following steps need to be completed after the hypervisor's installation:
-
-#. For resize and migrate functionality, enable password-less SSH
- authentication and set up the ``/images`` directory on dom0.
-
-#. Install the XAPI plug-ins.
-
-#. To support AMI type images, you must set up ``/boot/guest``
- symlink/directory in dom0.
-
-#. Create a paravirtualized virtual machine that can run ``nova-compute``.
-
-#. Install and configure ``nova-compute`` in the above virtual machine.
-
-#. To support live migration requiring no block device migration, you should
- add the current host to a xapi pool using shared storage. You need to know
- the pool master ip address, user name and password:
-
-.. code-block:: console
-
- xe pool-join master-address=MASTER_IP master-username=root master-password=MASTER_PASSWORD
-
-Install XAPI plug-ins
----------------------
-
-When you use a XAPI managed hypervisor, you can install a Python script (or any
-executable) on the host side, and execute that through XenAPI. These scripts
-are called plug-ins. The OpenStack related XAPI plug-ins live in OpenStack
-os-xenapi code repository. These plug-ins have to be copied to dom0's
-filesystem, to the appropriate directory, where XAPI can find them. It is
-important to ensure that the version of the plug-ins are in line with the
-OpenStack Compute installation you are using.
-
-The plugins should typically be copied from the Nova installation running in
-the Compute's DomU (``pip show os-xenapi`` to find its location), but if you
-want to download the latest version the following procedure can be used.
-
-**Manually installing the plug-ins**
-
-#. Create temporary files/directories:
-
- .. code-block:: console
-
- $ OS_XENAPI_TARBALL=$(mktemp)
- $ OS_XENAPI_SOURCES=$(mktemp -d)
-
-#. Get the source from the openstack.org archives. The example assumes the
- latest release is used, and the XenServer host is accessible as xenserver.
- Match those parameters to your setup.
-
- .. code-block:: console
-
- $ OS_XENAPI_URL=https://tarballs.openstack.org/os-xenapi/os-xenapi-0.1.1.tar.gz
- $ wget -qO "$OS_XENAPI_TARBALL" "$OS_XENAPI_URL"
- $ tar xvf "$OS_XENAPI_TARBALL" -d "$OS_XENAPI_SOURCES"
-
-#. Copy the plug-ins to the hypervisor:
-
- .. code-block:: console
-
- $ PLUGINPATH=$(find $OS_XENAPI_SOURCES -path '*/xapi.d/plugins' -type d -print)
- $ tar -czf - -C "$PLUGINPATH" ./ |
- > ssh root@xenserver tar -xozf - -C /etc/xapi.d/plugins
-
-#. Remove temporary files/directories:
-
- .. code-block:: console
-
- $ rm "$OS_XENAPI_TARBALL"
- $ rm -rf "$OS_XENAPI_SOURCES"
-
-Prepare for AMI type images
----------------------------
-
-To support AMI type images in your OpenStack installation, you must create the
-``/boot/guest`` directory on dom0. One of the OpenStack XAPI plugins will
-extract the kernel and ramdisk from AKI and ARI images and put them to that
-directory.
-
-OpenStack maintains the contents of this directory and its size should not
-increase during normal operation. However, in case of power failures or
-accidental shutdowns, some files might be left over. To prevent these files
-from filling up dom0's filesystem, set up this directory as a symlink that
-points to a subdirectory of the local SR.
-
-Run these commands in dom0 to achieve this setup:
-
-.. code-block:: console
-
- # LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal)
- # LOCALPATH="/var/run/sr-mount/$LOCAL_SR/os-guest-kernels"
- # mkdir -p "$LOCALPATH"
- # ln -s "$LOCALPATH" /boot/guest
-
-Modify dom0 for resize/migration support
-----------------------------------------
-
-To resize servers with XenServer you must:
-
-* Establish a root trust between all hypervisor nodes of your deployment:
-
- To do so, generate an ssh key-pair with the :command:`ssh-keygen` command.
- Ensure that each of your dom0's ``authorized_keys`` file (located in
- ``/root/.ssh/authorized_keys``) contains the public key fingerprint (located
- in ``/root/.ssh/id_rsa.pub``).
-
-* Provide a ``/images`` mount point to the dom0 for your hypervisor:
-
- dom0 space is at a premium so creating a directory in dom0 is potentially
- dangerous and likely to fail especially when you resize large servers. The
- least you can do is to symlink ``/images`` to your local storage SR. The
- following instructions work for an English-based installation of XenServer
- and in the case of ext3-based SR (with which the resize functionality is
- known to work correctly).
-
- .. code-block:: console
-
- # LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal)
- # IMG_DIR="/var/run/sr-mount/$LOCAL_SR/images"
- # mkdir -p "$IMG_DIR"
- # ln -s "$IMG_DIR" /images
-
-XenAPI configuration reference
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following section discusses some commonly changed options when using the
-XenAPI driver. The table below provides a complete reference of all
-configuration options available for configuring XAPI with OpenStack.
-
-The recommended way to use XAPI with OpenStack is through the XenAPI driver.
-To enable the XenAPI driver, add the following configuration options to
-``/etc/nova/nova.conf`` and restart ``OpenStack Compute``:
-
-.. code-block:: ini
-
- compute_driver = xenapi.XenAPIDriver
- [xenserver]
- connection_url = http://your_xenapi_management_ip_address
- connection_username = root
- connection_password = your_password
- ovs_integration_bridge = br-int
-
-These connection details are used by OpenStack Compute service to contact your
-hypervisor and are the same details you use to connect XenCenter, the XenServer
-management console, to your XenServer node.
-
-.. note::
-
- The ``connection_url`` is generally the management network IP
- address of the XenServer.
-
-Networking configuration
-------------------------
-
-The Networking service in the Compute node is running
-``neutron-openvswitch-agent``. This manages ``dom0``\'s OVS. You should refer
-to the :neutron-doc:`openvswitch_agent.ini sample
-` for details, however there are
-several specific items to look out for.
-
-.. code-block:: ini
-
- [agent]
- minimize_polling = False
- root_helper_daemon = xenapi_root_helper
-
- [ovs]
- of_listen_address = management_ip_address
- ovsdb_connection = tcp:your_xenapi_management_ip_address:6640
- bridge_mappings = :, ...
- integration_bridge = br-int
-
- [xenapi]
- connection_url = http://your_xenapi_management_ip_address
- connection_username = root
- connection_password = your_pass_word
-
-.. note::
-
- The ``ovsdb_connection`` is the connection string for the native OVSDB
- backend, you need to enable port 6640 in dom0.
-
-Agent
------
-
-The agent is a piece of software that runs on the instances, and communicates
-with OpenStack. In case of the XenAPI driver, the agent communicates with
-OpenStack through XenStore (see `the Xen Project Wiki
-`_ for more information on XenStore).
-
-If you don't have the guest agent on your VMs, it takes a long time for
-OpenStack Compute to detect that the VM has successfully started. Generally a
-large timeout is required for Windows instances, but you may want to adjust:
-``agent_version_timeout`` within the ``[xenserver]`` section.
-
-VNC proxy address
------------------
-
-Assuming you are talking to XAPI through a management network, and XenServer is
-on the address: 10.10.1.34 specify the same address for the vnc proxy address:
-``server_proxyclient_address=10.10.1.34``
-
-Storage
--------
-
-You can specify which Storage Repository to use with nova by editing the
-following flag. To use the local-storage setup by the default installer:
-
-.. code-block:: ini
-
- sr_matching_filter = "other-config:i18n-key=local-storage"
-
-Another alternative is to use the "default" storage (for example if you have
-attached NFS or any other shared storage):
-
-.. code-block:: ini
-
- sr_matching_filter = "default-sr:true"
-
-Use different image handler
----------------------------
-
-We support three different implementations for glance image handler. You
-can choose a specific image handler based on the demand:
-
-* ``direct_vhd``: This image handler will call XAPI plugins to directly
- process the VHD files in XenServer SR(Storage Repository). So this handler
- only works when the host's SR type is file system based e.g. ext, nfs.
-
-* ``vdi_local_dev``: This image handler uploads ``tgz`` compressed raw
- disk images to the glance image service.
-
-* ``vdi_remote_stream``: With this image handler, the image data streams
- between XenServer and the glance image service. As it uses the remote
- APIs supported by XAPI, this plugin works for all SR types supported by
- XenServer.
-
-``direct_vhd`` is the default image handler. If want to use a different image
-handler, you can change the config setting of ``image_handler`` within the
-``[xenserver]`` section. For example, the following config setting is to use
-``vdi_remote_stream`` as the image handler:
-
-.. code-block:: ini
-
- [xenserver]
- image_handler=vdi_remote_stream
diff --git a/doc/source/admin/configuration/hypervisor-xen-libvirt.rst b/doc/source/admin/configuration/hypervisor-xen-libvirt.rst
deleted file mode 100644
index 2c28cf03d40..00000000000
--- a/doc/source/admin/configuration/hypervisor-xen-libvirt.rst
+++ /dev/null
@@ -1,249 +0,0 @@
-===============
-Xen via libvirt
-===============
-
-OpenStack Compute supports the Xen Project Hypervisor (or Xen). Xen can be
-integrated with OpenStack Compute via the `libvirt `_
-`toolstack `_ or via the `XAPI
-`_ `toolstack
-`_. This section describes how
-to set up OpenStack Compute with Xen and libvirt. For information on how to
-set up Xen with XAPI refer to :doc:`hypervisor-xen-api`.
-
-Installing Xen with libvirt
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-At this stage we recommend using the baseline that we use for the `Xen Project
-OpenStack CI Loop
-`_, which
-contains the most recent stability fixes to both Xen and libvirt.
-
-`Xen 4.5.1
-`_
-(or newer) and `libvirt 1.2.15 `_ (or newer)
-contain the minimum required OpenStack improvements for Xen. Although libvirt
-1.2.15 works with Xen, libvirt 1.3.2 or newer is recommended. The necessary
-Xen changes have also been backported to the Xen 4.4.3 stable branch. Please
-check with the Linux and FreeBSD distros you are intending to use as `Dom 0
-`_, whether the relevant
-version of Xen and libvirt are available as installable packages.
-
-The latest releases of Xen and libvirt packages that fulfil the above minimum
-requirements for the various openSUSE distributions can always be found and
-installed from the `Open Build Service
-`_ Virtualization
-project. To install these latest packages, add the Virtualization repository
-to your software management stack and get the newest packages from there. More
-information about the latest Xen and libvirt packages are available `here
-`__ and `here
-`__.
-
-Alternatively, it is possible to use the Ubuntu LTS 14.04 Xen Package
-**4.4.1-0ubuntu0.14.04.4** (Xen 4.4.1) and apply the patches outlined `here
-`__.
-You can also use the Ubuntu LTS 14.04 libvirt package **1.2.2
-libvirt_1.2.2-0ubuntu13.1.7** as baseline and update it to libvirt version
-1.2.15, or 1.2.14 with the patches outlined `here
-`__
-applied. Note that this will require rebuilding these packages partly from
-source.
-
-For further information and latest developments, you may want to consult the
-Xen Project's `mailing lists for OpenStack related issues and questions
-`_.
-
-Configuring Xen with libvirt
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To enable Xen via libvirt, ensure the following options are set in
-``/etc/nova/nova.conf`` on all hosts running the ``nova-compute`` service.
-
-.. code-block:: ini
-
- compute_driver = libvirt.LibvirtDriver
-
- [libvirt]
- virt_type = xen
-
-Additional configuration options
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Use the following as a guideline for configuring Xen for use in OpenStack:
-
-#. **Dom0 memory**: Set it between 1GB and 4GB by adding the following
- parameter to the Xen Boot Options in the `grub.conf `_ file.
-
- .. code-block:: ini
-
- dom0_mem=1024M
-
- .. note::
-
- The above memory limits are suggestions and should be based on the
- available compute host resources. For large hosts that will run many
- hundreds of instances, the suggested values may need to be higher.
-
- .. note::
-
- The location of the grub.conf file depends on the host Linux distribution
- that you are using. Please refer to the distro documentation for more
- details (see `Dom 0 `_ for more resources).
-
-#. **Dom0 vcpus**: Set the virtual CPUs to 4 and employ CPU pinning by adding
- the following parameters to the Xen Boot Options in the `grub.conf
- `_ file.
-
- .. code-block:: ini
-
- dom0_max_vcpus=4 dom0_vcpus_pin
-
- .. note::
-
- Note that the above virtual CPU limits are suggestions and should be
- based on the available compute host resources. For large hosts, that will
- run many hundred of instances, the suggested values may need to be
- higher.
-
-#. **PV vs HVM guests**: A Xen virtual machine can be paravirtualized (PV) or
- hardware virtualized (HVM). The virtualization mode determines the
- interaction between Xen, Dom 0, and the guest VM's kernel. PV guests are
- aware of the fact that they are virtualized and will co-operate with Xen and
- Dom 0. The choice of virtualization mode determines performance
- characteristics. For an overview of Xen virtualization modes, see `Xen Guest
- Types `_.
-
- In OpenStack, customer VMs may run in either PV or HVM mode. The mode is a
- property of the operating system image used by the VM, and is changed by
- adjusting the image metadata stored in the Image service. The image
- metadata can be changed using the :command:`openstack` commands.
-
- To choose one of the HVM modes (HVM, HVM with PV Drivers or PVHVM), use
- :command:`openstack` to set the ``vm_mode`` property to ``hvm``.
-
- To choose one of the HVM modes (HVM, HVM with PV Drivers or PVHVM), use one
- of the following two commands:
-
- .. code-block:: console
-
- $ openstack image set --property vm_mode=hvm IMAGE
-
- To chose PV mode, which is supported by NetBSD, FreeBSD and Linux, use one
- of the following two commands
-
- .. code-block:: console
-
- $ openstack image set --property vm_mode=xen IMAGE
-
- .. note::
-
- The default for virtualization mode in nova is PV mode.
-
-#. **Image formats**: Xen supports raw, qcow2 and vhd image formats. For more
- information on image formats, refer to the `OpenStack Virtual Image Guide
- `__ and the
- `Storage Options Guide on the Xen Project Wiki
- `_.
-
-#. **Image metadata**: In addition to the ``vm_mode`` property discussed above,
- the ``hypervisor_type`` property is another important component of the image
- metadata, especially if your cloud contains mixed hypervisor compute nodes.
- Setting the ``hypervisor_type`` property allows the nova scheduler to select
- a compute node running the specified hypervisor when launching instances of
- the image. Image metadata such as ``vm_mode``, ``hypervisor_type``,
- architecture, and others can be set when importing the image to the Image
- service. The metadata can also be changed using the :command:`openstack`
- commands:
-
- .. code-block:: console
-
- $ openstack image set --property hypervisor_type=xen vm_mode=hvm IMAGE
-
- For more more information on image metadata, refer to the `OpenStack Virtual
- Image Guide `__.
-
-#. **Libguestfs file injection**: OpenStack compute nodes can use `libguestfs
- `_ to inject files into an instance's image prior to
- launching the instance. libguestfs uses libvirt's QEMU driver to start a
- qemu process, which is then used to inject files into the image. When using
- libguestfs for file injection, the compute node must have the libvirt qemu
- driver installed, in addition to the Xen driver. In RPM based distributions,
- the qemu driver is provided by the ``libvirt-daemon-qemu`` package. In
- Debian and Ubuntu, the qemu driver is provided by the ``libvirt-bin``
- package.
-
-Troubleshoot Xen with libvirt
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-**Important log files**: When an instance fails to start, or when you come
-across other issues, you should first consult the following log files:
-
-* ``/var/log/nova/nova-compute.log``
-
-* ``/var/log/libvirt/libxl/libxl-driver.log``,
-
-* ``/var/log/xen/qemu-dm-${instancename}.log``,
-
-* ``/var/log/xen/xen-hotplug.log``,
-
-* ``/var/log/xen/console/guest-${instancename}`` (to enable see `Enabling Guest
- Console Logs
- `_)
-
-* Host Console Logs (read `Enabling and Retrieving Host Console Logs
- `_).
-
-If you need further help you can ask questions on the mailing lists `xen-users@
-`_,
-`wg-openstack@ `_ or `raise a bug `_ against Xen.
-
-Known issues
-~~~~~~~~~~~~
-
-* **Networking**: Xen via libvirt is currently only supported with
- nova-network. Fixes for a number of bugs are currently being worked on to
- make sure that Xen via libvirt will also work with OpenStack Networking
- (neutron).
-
- .. todo:: Is this still true?
-
-* **Live migration**: Live migration is supported in the libvirt libxl driver
- since version 1.2.5. However, there were a number of issues when used with
- OpenStack, in particular with libvirt migration protocol compatibility. It is
- worth mentioning that libvirt 1.3.0 addresses most of these issues. We do
- however recommend using libvirt 1.3.2, which is fully supported and tested as
- part of the Xen Project CI loop. It addresses live migration monitoring
- related issues and adds support for peer-to-peer migration mode, which nova
- relies on.
-
-* **Live migration monitoring**: On compute nodes running Kilo or later, live
- migration monitoring relies on libvirt APIs that are only implemented from
- libvirt version 1.3.1 onwards. When attempting to live migrate, the migration
- monitoring thread would crash and leave the instance state as "MIGRATING". If
- you experience such an issue and you are running on a version released before
- libvirt 1.3.1, make sure you backport libvirt commits ad71665 and b7b4391
- from upstream.
-
-Additional information and resources
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following section contains links to other useful resources.
-
-* `wiki.xenproject.org/wiki/OpenStack `_ - OpenStack Documentation on the Xen Project wiki
-
-* `wiki.xenproject.org/wiki/OpenStack_CI_Loop_for_Xen-Libvirt
- `_ -
- Information about the Xen Project OpenStack CI Loop
-
-* `wiki.xenproject.org/wiki/OpenStack_via_DevStack
- `_ - How to set up
- OpenStack via DevStack
-
-* `Mailing lists for OpenStack related issues and questions
- `_ - This
- list is dedicated to coordinating bug fixes and issues across Xen, libvirt
- and OpenStack and the CI loop.
diff --git a/doc/source/admin/configuration/hypervisor-zvm.rst b/doc/source/admin/configuration/hypervisor-zvm.rst
new file mode 100644
index 00000000000..1915206b99b
--- /dev/null
+++ b/doc/source/admin/configuration/hypervisor-zvm.rst
@@ -0,0 +1,149 @@
+===
+zVM
+===
+
+z/VM System Requirements
+------------------------
+
+* The appropriate APARs installed, the current list of which can be found: z/VM
+ OpenStack Cloud Information (http://www.vm.ibm.com/sysman/osmntlvl.html).
+
+.. note::
+
+ IBM z Systems hardware requirements are based on both the applications and
+ the load on the system.
+
+
+Active Engine Guide
+-------------------
+
+Active engine is used as an initial configuration and management tool during
+deployed machine startup. Currently the z/VM driver uses ``zvmguestconfigure``
+and ``cloud-init`` as a two stage active engine.
+
+Installation and Configuration of zvmguestconfigure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Cloudlib4zvm supports initiating changes to a Linux on z Systems virtual
+machine while Linux is shut down or the virtual machine is logged off.
+The changes to Linux are implemented using an activation engine (AE)
+that is run when Linux is booted the next time. The first active engine,
+``zvmguestconfigure``, must be installed in the Linux on z Systems virtual
+server so it can process change request files transmitted by the
+cloudlib4zvm service to the reader of the virtual machine as a class X file.
+
+.. note::
+
+ An additional activation engine, cloud-init, should be installed to handle
+ OpenStack related tailoring of the system.
+ The cloud-init AE relies on tailoring performed by ``zvmguestconfigure``.
+
+Installation and Configuration of cloud-init
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+OpenStack uses cloud-init as its activation engine. Some Linux distributions
+include cloud-init either already installed or available to be installed.
+If your distribution does not include cloud-init, you can download
+the code from https://launchpad.net/cloud-init/+download.
+After installation, if you issue the following
+shell command and no errors occur, cloud-init is installed correctly::
+
+ cloud-init init --local
+
+Installation and configuration of cloud-init differs among different Linux
+distributions, and cloud-init source code may change. This section provides
+general information, but you may have to tailor cloud-init
+to meet the needs of your Linux distribution. You can find a
+community-maintained list of dependencies at http://ibm.biz/cloudinitLoZ.
+
+As of the Rocky release, the z/VM OpenStack support has been tested with
+cloud-init 0.7.4 and 0.7.5 for RHEL6.x and SLES11.x, 0.7.6 for RHEL7.x and
+SLES12.x, and 0.7.8 for Ubuntu 16.04.
+
+During cloud-init installation, some dependency packages may be required.
+You can use zypper and python setuptools to easily resolve these dependencies.
+See https://pypi.python.org/pypi/setuptools for more information.
+
+
+Image guide
+-----------
+
+This guideline will describe the requirements and steps to create and
+configure images for use with z/VM.
+
+Image Requirements
+~~~~~~~~~~~~~~~~~~
+
+* The following Linux distributions are supported for deploy:
+
+ * RHEL 6.2, 6.3, 6.4, 6.5, 6.6, and 6.7
+ * RHEL 7.0, 7.1 and 7.2
+ * SLES 11.2, 11.3, and 11.4
+ * SLES 12 and SLES 12.1
+ * Ubuntu 16.04
+
+* A supported root disk type for snapshot/spawn. The following are supported:
+
+ * FBA
+ * ECKD
+
+* An image deployed on a compute node must match the disk type supported by
+ that compute node, as configured by the ``zvm_diskpool_type`` property in
+ the `zvmsdk.conf`_ configuration file in `zvm cloud connector`_
+ A compute node supports deployment on either an ECKD or FBA image,
+ but not both at the same time. If you wish to switch image types,
+ you need to change the ``zvm_diskpool_type`` and
+ ``zvm_diskpool`` properties in the `zvmsdk.conf`_ file, accordingly.
+ Then restart the nova-compute service to make the changes take effect.
+
+* If you deploy an instance with an ephemeral disk, both the root disk and the
+ ephemeral disk will be created with the disk type that was specified by
+ ``zvm_diskpool_type`` property in the `zvmsdk.conf`_ file. That property can
+ specify either ECKD or FBA.
+
+* The network interfaces must be IPv4 interfaces.
+
+* Image names should be restricted to the UTF-8 subset, which corresponds to
+ the ASCII character set. In addition, special characters such as ``/``, ``\``,
+ ``$``, ``%``, ``@`` should not be used. For the FBA disk type "vm",
+ capture and deploy is supported only for an FBA disk with a single partition.
+ Capture and deploy is not supported for the FBA disk type "vm" on a CMS
+ formatted FBA disk.
+
+* The virtual server/Linux instance used as the source of the new image should
+ meet the following criteria:
+
+ 1. The root filesystem must not be on a logical volume.
+
+ 2. The minidisk on which the root filesystem resides should be a minidisk of
+ the same type as desired for a subsequent deploy (for example, an ECKD disk
+ image should be captured for a subsequent deploy to an ECKD disk).
+
+ 3. The minidisks should not be a full-pack minidisk, since cylinder 0 on
+ full-pack minidisks is reserved, and should be defined with virtual
+ address 0100.
+
+ 4. The root disk should have a single partition.
+
+ 5. The image being captured should not have any network interface cards (NICs)
+ defined below virtual address 1100.
+
+In addition to the specified criteria, the following recommendations allow for
+efficient use of the image:
+
+* The minidisk on which the root filesystem resides should be defined as a
+ multiple of full gigabytes in size (for example, 1GB or 2GB).
+ OpenStack specifies disk sizes in full gigabyte values, whereas z/VM
+ handles disk sizes in other ways (cylinders for ECKD disks, blocks for FBA
+ disks, and so on). See the appropriate online information if you need to
+ convert cylinders or blocks to gigabytes; for example:
+ http://www.mvsforums.com/helpboards/viewtopic.php?t=8316.
+
+* During subsequent deploys of the image, the OpenStack code will ensure that
+ a disk image is not copied to a disk smaller than the source disk,
+ as this would result in loss of data. The disk specified in
+ the flavor should therefore be equal to or slightly larger than the source
+ virtual machine's root disk.
+
+.. _zvmsdk.conf: https://cloudlib4zvm.readthedocs.io/en/latest/configuration.html#configuration-options
+.. _zvm cloud connector: https://cloudlib4zvm.readthedocs.io/en/latest/
diff --git a/doc/source/admin/configuration/hypervisors.rst b/doc/source/admin/configuration/hypervisors.rst
index 88ed368aa90..ed913b083f3 100644
--- a/doc/source/admin/configuration/hypervisors.rst
+++ b/doc/source/admin/configuration/hypervisors.rst
@@ -5,16 +5,15 @@ Hypervisors
.. toctree::
:maxdepth: 1
- hypervisor-basics.rst
- hypervisor-kvm.rst
- hypervisor-qemu.rst
- hypervisor-xen-api.rst
- hypervisor-xen-libvirt.rst
- hypervisor-lxc.rst
- hypervisor-vmware.rst
- hypervisor-hyper-v.rst
- hypervisor-virtuozzo.rst
- hypervisor-powervm.rst
+ hypervisor-kvm
+ hypervisor-qemu
+ hypervisor-lxc
+ hypervisor-vmware
+ hypervisor-hyper-v
+ hypervisor-virtuozzo
+ hypervisor-powervm
+ hypervisor-zvm
+ hypervisor-ironic
OpenStack Compute supports many hypervisors, which might make it difficult for
you to choose one. Most installations use only one hypervisor. However, you
@@ -38,31 +37,52 @@ The following hypervisors are supported:
* `VMware vSphere`_ 5.1.0 and newer - Runs VMware-based Linux and Windows
images through a connection with a vCenter server.
-* `Xen (using libvirt)`_ - Xen Project Hypervisor using libvirt as
- management interface into ``nova-compute`` to run Linux, Windows, FreeBSD and
- NetBSD virtual machines.
-
-* `XenServer`_ - XenServer, Xen Cloud Platform (XCP) and other XAPI based Xen
- variants runs Linux or Windows virtual machines. You must install the
- ``nova-compute`` service in a para-virtualized VM.
-
* `Hyper-V`_ - Server virtualization with Microsoft Hyper-V, use to run
Windows, Linux, and FreeBSD virtual machines. Runs ``nova-compute`` natively
on the Windows virtualization platform.
* `Virtuozzo`_ 7.0.0 and newer - OS Containers and Kernel-based Virtual
- Machines supported via libvirt virt_type=parallels. The supported formats
- include ploop and qcow2 images.
+ Machines supported. The supported formats include ploop and qcow2 images.
-* `PowerVM`_ Server virtualization with IBM PowerVM for AIX, IBM i, and Linux
+* `PowerVM`_ - Server virtualization with IBM PowerVM for AIX, IBM i, and Linux
workloads on the Power Systems platform.
-.. _KVM: http://www.linux-kvm.org/page/Main_Page
-.. _LXC: https://linuxcontainers.org/
-.. _QEMU: http://wiki.qemu.org/Manual
-.. _VMware vSphere: https://www.vmware.com/support/vsphere-hypervisor
-.. _Xen (using libvirt): http://www.xenproject.org
-.. _XenServer: http://xenserver.org
-.. _Hyper-V: https://azure.microsoft.com/en-us/
-.. _Virtuozzo: https://www.virtuozzo.com/products/vip.html#product-virtuozzo/
+* `zVM`_ - Server virtualization on z Systems and IBM LinuxONE, it can run Linux,
+ z/OS and more.
+
+* `Ironic`_ - OpenStack project which provisions bare metal (as opposed to virtual)
+ machines.
+
+Nova supports hypervisors via virt drivers. Nova has the following in tree
+virt drivers:
+
+* :oslo.config:option:`compute_driver` = ``libvirt.LibvirtDriver``
+
+ This driver runs on Linux and supports multiple hypervisor backends, which
+ can be configured via the :oslo.config:option:`libvirt.virt_type` config
+ option.
+
+* :oslo.config:option:`compute_driver` = ``ironic.IronicDriver``
+
+* :oslo.config:option:`compute_driver` = ``vmwareapi.VMwareVCDriver``
+
+* :oslo.config:option:`compute_driver` = ``hyperv.HyperVDriver``
+
+* :oslo.config:option:`compute_driver` = ``powervm.PowerVMDriver``
+
+* :oslo.config:option:`compute_driver` = ``zvm.ZVMDriver``
+
+* :oslo.config:option:`compute_driver` = ``fake.FakeDriver``
+
+ This driver does not spawn any virtual machines and therefore should only be
+ used during testing.
+
+.. _KVM: https://www.linux-kvm.org/page/Main_Page
+.. _LXC: https://linuxcontainers.org
+.. _QEMU: https://wiki.qemu.org/Manual
+.. _VMware vSphere: https://www.vmware.com/support/vsphere-hypervisor.html
+.. _Hyper-V: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview
+.. _Virtuozzo: https://www.virtuozzo.com/products/vz7.html
.. _PowerVM: https://www.ibm.com/us-en/marketplace/ibm-powervm
+.. _zVM: https://www.ibm.com/it-infrastructure/z/zvm
+.. _Ironic: https://docs.openstack.org/ironic/latest/
diff --git a/doc/source/admin/configuration/index.rst b/doc/source/admin/configuration/index.rst
index 51a3b810eff..233597b1fe4 100644
--- a/doc/source/admin/configuration/index.rst
+++ b/doc/source/admin/configuration/index.rst
@@ -1,6 +1,6 @@
-===============
- Configuration
-===============
+=============
+Configuration
+=============
To configure your Compute installation, you must define configuration options
in these files:
@@ -19,12 +19,11 @@ A list of config options based on different topics can be found below:
.. toctree::
:maxdepth: 1
- /admin/configuration/api.rst
- /admin/configuration/resize.rst
- /admin/configuration/fibre-channel.rst
- /admin/configuration/iscsi-offload.rst
- /admin/configuration/hypervisors.rst
- /admin/configuration/schedulers.rst
- /admin/configuration/cells.rst
- /admin/configuration/logs.rst
- /admin/configuration/samples/index.rst
+ /admin/configuration/api
+ /admin/configuration/resize
+ /admin/configuration/cross-cell-resize
+ /admin/configuration/fibre-channel
+ /admin/configuration/iscsi-offload
+ /admin/configuration/hypervisors
+ /admin/configuration/logs
+ /admin/configuration/samples/index
diff --git a/doc/source/admin/configuration/iscsi-offload.rst b/doc/source/admin/configuration/iscsi-offload.rst
index ac477082ebd..921869db3cf 100644
--- a/doc/source/admin/configuration/iscsi-offload.rst
+++ b/doc/source/admin/configuration/iscsi-offload.rst
@@ -9,8 +9,8 @@ desired. Once an open-iscsi interface is configured, the iface name
parameter for use. All iSCSI sessions will be bound to this iSCSI interface.
Currently supported transports (``iface.transport_name``) are ``be2iscsi``,
-``bnx2i``, ``cxgb3i``, ``cxgb4i``, ``qla4xxx``, ``ocs``. Configuration changes
-are required on the compute node only.
+``bnx2i``, ``cxgb3i``, ``cxgb4i``, ``qla4xxx``, ``ocs``, ``tcp``. Configuration
+changes are required on the compute node only.
iSER is supported using the separate iSER LibvirtISERVolumeDriver and will be
rejected if used via the ``iscsi_iface`` parameter.
@@ -69,5 +69,5 @@ iSCSI iface configuration
to work. Some transports may require ``iface.ipaddress`` and
``iface.net_ifacename`` as well to bind correctly.
- Detailed configuration instructions can be found at
- http://www.open-iscsi.org/docs/README.
+ Detailed configuration instructions can be found at:
+ https://github.com/open-iscsi/open-iscsi/blob/master/README
diff --git a/doc/source/admin/configuration/logs.rst b/doc/source/admin/configuration/logs.rst
index 74d3919fcc4..7ecdf1b358f 100644
--- a/doc/source/admin/configuration/logs.rst
+++ b/doc/source/admin/configuration/logs.rst
@@ -22,21 +22,9 @@ The corresponding log file of each Compute service is stored in the
* - ``nova-conductor.log``
- ``openstack-nova-conductor``
- ``nova-conductor``
- * - ``nova-consoleauth.log``
- - ``openstack-nova-consoleauth``
- - ``nova-consoleauth``
- * - ``nova-network.log`` [#a]_
- - ``openstack-nova-network``
- - ``nova-network``
* - ``nova-manage.log``
- ``nova-manage``
- ``nova-manage``
* - ``nova-scheduler.log``
- ``openstack-nova-scheduler``
- ``nova-scheduler``
-
-.. rubric:: Footnotes
-
-.. [#a] The ``nova`` network service (``openstack-nova-network``/
- ``nova-network``) only runs in deployments that are not configured
- to use the Networking service (``neutron``).
diff --git a/doc/source/admin/configuration/resize.rst b/doc/source/admin/configuration/resize.rst
index 7dd2041815f..abf0828f33b 100644
--- a/doc/source/admin/configuration/resize.rst
+++ b/doc/source/admin/configuration/resize.rst
@@ -1,12 +1,19 @@
-================
-Configure resize
-================
+======
+Resize
+======
Resize (or Server resize) is the ability to change the flavor of a server, thus
allowing it to upscale or downscale according to user needs. For this feature
to work properly, you might need to configure some underlying virt layers.
-.. todo:: This document needs to be updated for other virt drivers, shared
+This document describes how to configure hosts for standard resize.
+For information on :term:`cross-cell resize `, refer to
+:doc:`/admin/configuration/cross-cell-resize`.
+
+Virt drivers
+------------
+
+.. todo:: This section needs to be updated for other virt drivers, shared
storage considerations, etc.
KVM
@@ -20,9 +27,13 @@ compute host to another is needed to copy the VM file across.
Cloud end users can find out how to resize a server by reading
:doc:`/user/resize`.
-XenServer
-~~~~~~~~~
-To get resize to work with XenServer (and XCP), you need to establish a root
-trust between all hypervisor nodes and provide an ``/image`` mount point to
-your hypervisors dom0.
+Automatic confirm
+-----------------
+
+There is a periodic task configured by configuration option
+:oslo.config:option:`resize_confirm_window` (in seconds).
+If this value is not 0, the ``nova-compute`` service will check whether
+servers are in a resized state longer than the value of
+:oslo.config:option:`resize_confirm_window` and if so will automatically
+confirm the resize of the servers.
diff --git a/doc/source/admin/configuration/samples/index.rst b/doc/source/admin/configuration/samples/index.rst
index 6db5a16a488..30035c1fb0f 100644
--- a/doc/source/admin/configuration/samples/index.rst
+++ b/doc/source/admin/configuration/samples/index.rst
@@ -7,6 +7,15 @@ Files in this section can be found in ``/etc/nova``.
.. toctree::
:maxdepth: 2
- api-paste.ini.rst
- policy.yaml.rst
- rootwrap.conf.rst
+ api-paste.ini
+ rootwrap.conf
+
+.. # NOTE(gmann): Keep policy sample file for HTML only.
+ # Sample file are too large and cause TeX memeor issue.
+ # ref bug# https://bugs.launchpad.net/nova/+bug/1883200
+.. only:: html
+
+ .. toctree::
+ :maxdepth: 2
+
+ policy.yaml
diff --git a/doc/source/admin/configuration/schedulers.rst b/doc/source/admin/configuration/schedulers.rst
deleted file mode 100644
index 4ef7efcbd9b..00000000000
--- a/doc/source/admin/configuration/schedulers.rst
+++ /dev/null
@@ -1,1340 +0,0 @@
-==================
-Compute schedulers
-==================
-
-Compute uses the ``nova-scheduler`` service to determine how to dispatch
-compute requests. For example, the ``nova-scheduler`` service determines on
-which host a VM should launch. In the context of filters, the term ``host``
-means a physical node that has a ``nova-compute`` service running on it. You
-can configure the scheduler through a variety of options.
-
-Compute is configured with the following default scheduler options in the
-``/etc/nova/nova.conf`` file:
-
-.. code-block:: ini
-
- [scheduler]
- driver = filter_scheduler
-
- [filter_scheduler]
- available_filters = nova.scheduler.filters.all_filters
- enabled_filters = RetryFilter, AvailabilityZoneFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter
-
-By default, the scheduler ``driver`` is configured as a filter scheduler, as
-described in the next section. In the default configuration, this scheduler
-considers hosts that meet all the following criteria:
-
-* Have not been attempted for scheduling purposes (``RetryFilter``).
-
-* Are in the requested availability zone (``AvailabilityZoneFilter``).
-
-* Can service the request (``ComputeFilter``).
-
-* Satisfy the extra specs associated with the instance type
- (``ComputeCapabilitiesFilter``).
-
-* Satisfy any architecture, hypervisor type, or virtual machine mode properties
- specified on the instance's image properties (``ImagePropertiesFilter``).
-
-* Are on a different host than other instances of a group (if requested)
- (``ServerGroupAntiAffinityFilter``).
-
-* Are in a set of group hosts (if requested) (``ServerGroupAffinityFilter``).
-
-The scheduler chooses a new host when an instance is migrated.
-
-When evacuating instances from a host, the scheduler service honors the target
-host defined by the administrator on the :command:`nova evacuate` command. If
-a target is not defined by the administrator, the scheduler determines the
-target host. For information about instance evacuation, see
-:ref:`Evacuate instances `.
-
-.. _compute-scheduler-filters:
-
-Filter scheduler
-~~~~~~~~~~~~~~~~
-
-The filter scheduler (``nova.scheduler.filter_scheduler.FilterScheduler``) is
-the default scheduler for scheduling virtual machine instances. It supports
-filtering and weighting to make informed decisions on where a new instance
-should be created.
-
-When the filter scheduler receives a request for a resource, it first applies
-filters to determine which hosts are eligible for consideration when
-dispatching a resource. Filters are binary: either a host is accepted by the
-filter, or it is rejected. Hosts that are accepted by the filter are then
-processed by a different algorithm to decide which hosts to use for that
-request, described in the :ref:`weights` section.
-
-**Filtering**
-
-.. figure:: /figures/filteringWorkflow1.png
-
-The ``available_filters`` configuration option in ``nova.conf``
-provides the Compute service with the list of the filters that are available
-for use by the scheduler. The default setting specifies all of the filters that
-are included with the Compute service:
-
-.. code-block:: ini
-
- [filter_scheduler]
- available_filters = nova.scheduler.filters.all_filters
-
-This configuration option can be specified multiple times. For example, if you
-implemented your own custom filter in Python called ``myfilter.MyFilter`` and
-you wanted to use both the built-in filters and your custom filter, your
-``nova.conf`` file would contain:
-
-.. code-block:: ini
-
- [filter_scheduler]
- available_filters = nova.scheduler.filters.all_filters
- available_filters = myfilter.MyFilter
-
-The ``enabled_filters`` configuration option in ``nova.conf`` defines
-the list of filters that are applied by the ``nova-scheduler`` service. The
-default filters are:
-
-.. code-block:: ini
-
- [filter_scheduler]
- enabled_filters = RetryFilter, AvailabilityZoneFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter
-
-Compute filters
-~~~~~~~~~~~~~~~
-
-The following sections describe the available compute filters.
-
-AggregateCoreFilter
--------------------
-
-Filters host by CPU core numbers with a per-aggregate ``cpu_allocation_ratio``
-value. If the per-aggregate value is not found, the value falls back to the
-global setting. If the host is in more than one aggregate and more than one
-value is found, the minimum value will be used. For information about how to
-use this filter, see :ref:`host-aggregates`. See also :ref:`CoreFilter`.
-
-AggregateDiskFilter
--------------------
-
-Filters host by disk allocation with a per-aggregate ``disk_allocation_ratio``
-value. If the per-aggregate value is not found, the value falls back to the
-global setting. If the host is in more than one aggregate and more than one
-value is found, the minimum value will be used. For information about how to
-use this filter, see :ref:`host-aggregates`. See also :ref:`DiskFilter`.
-
-AggregateImagePropertiesIsolation
----------------------------------
-
-Matches properties defined in an image's metadata against those of aggregates
-to determine host matches:
-
-* If a host belongs to an aggregate and the aggregate defines one or more
- metadata that matches an image's properties, that host is a candidate to boot
- the image's instance.
-
-* If a host does not belong to any aggregate, it can boot instances from all
- images.
-
-For example, the following aggregate ``myWinAgg`` has the Windows operating
-system as metadata (named 'windows'):
-
-.. code-block:: console
-
- $ openstack aggregate show myWinAgg
- +-------------------+----------------------------+
- | Field | Value |
- +-------------------+----------------------------+
- | availability_zone | zone1 |
- | created_at | 2017-01-01T15:36:44.000000 |
- | deleted | False |
- | deleted_at | None |
- | hosts | [u'sf-devel'] |
- | id | 1 |
- | name | myWinAgg |
- | properties | os_distro='windows' |
- | updated_at | None |
- +-------------------+----------------------------+
-
-In this example, because the following Win-2012 image has the ``windows``
-property, it boots on the ``sf-devel`` host (all other filters being equal):
-
-.. code-block:: console
-
- $ openstack image show Win-2012
- +------------------+------------------------------------------------------+
- | Field | Value |
- +------------------+------------------------------------------------------+
- | checksum | ee1eca47dc88f4879d8a229cc70a07c6 |
- | container_format | bare |
- | created_at | 2016-12-13T09:30:30Z |
- | disk_format | qcow2 |
- | ... |
- | name | Win-2012 |
- | ... |
- | properties | os_distro='windows' |
- | ... |
-
-You can configure the ``AggregateImagePropertiesIsolation`` filter by using the
-following options in the ``nova.conf`` file:
-
-.. code-block:: ini
-
- # Considers only keys matching the given namespace (string).
- # Multiple values can be given, as a comma-separated list.
- aggregate_image_properties_isolation_namespace =
-
- # Separator used between the namespace and keys (string).
- aggregate_image_properties_isolation_separator = .
-
-.. _AggregateInstanceExtraSpecsFilter:
-
-AggregateInstanceExtraSpecsFilter
----------------------------------
-
-Matches properties defined in extra specs for an instance type against
-admin-defined properties on a host aggregate. Works with specifications that
-are scoped with ``aggregate_instance_extra_specs``. Multiple values can be
-given, as a comma-separated list. For backward compatibility, also works with
-non-scoped specifications; this action is highly discouraged because it
-conflicts with :ref:`ComputeCapabilitiesFilter` filter when you enable both
-filters. For information about how to use this filter, see the
-:ref:`host-aggregates` section.
-
-AggregateIoOpsFilter
---------------------
-
-Filters host by disk allocation with a per-aggregate ``max_io_ops_per_host``
-value. If the per-aggregate value is not found, the value falls back to the
-global setting. If the host is in more than one aggregate and more than one
-value is found, the minimum value will be used. For information about how to
-use this filter, see :ref:`host-aggregates`. See also :ref:`IoOpsFilter`.
-
-AggregateMultiTenancyIsolation
-------------------------------
-
-Ensures that the tenant (or list of tenants) creates all instances only on
-specific :ref:`host-aggregates`. If a host is in an aggregate that has the
-``filter_tenant_id`` metadata key, the host creates instances from only that
-tenant or list of tenants. A host can be in different aggregates. If a host
-does not belong to an aggregate with the metadata key, the host can create
-instances from all tenants. This setting does not isolate the aggregate from
-other tenants. Any other tenant can continue to build instances on the
-specified aggregate.
-
-AggregateNumInstancesFilter
----------------------------
-
-Filters host by number of instances with a per-aggregate
-``max_instances_per_host`` value. If the per-aggregate value is not found, the
-value falls back to the global setting. If the host is in more than one
-aggregate and thus more than one value is found, the minimum value will be
-used. For information about how to use this filter, see
-:ref:`host-aggregates`. See also :ref:`NumInstancesFilter`.
-
-AggregateRamFilter
-------------------
-
-Filters host by RAM allocation of instances with a per-aggregate
-``ram_allocation_ratio`` value. If the per-aggregate value is not found, the
-value falls back to the global setting. If the host is in more than one
-aggregate and thus more than one value is found, the minimum value will be
-used. For information about how to use this filter, see
-:ref:`host-aggregates`. See also :ref:`ramfilter`.
-
-AggregateTypeAffinityFilter
----------------------------
-
-This filter passes hosts if no ``instance_type`` key is set or the
-``instance_type`` aggregate metadata value contains the name of the
-``instance_type`` requested. The value of the ``instance_type`` metadata entry
-is a string that may contain either a single ``instance_type`` name or a
-comma-separated list of ``instance_type`` names, such as ``m1.nano`` or
-``m1.nano,m1.small``. For information about how to use this filter, see
-:ref:`host-aggregates`.
-
-AllHostsFilter
---------------
-
-This is a no-op filter. It does not eliminate any of the available hosts.
-
-AvailabilityZoneFilter
-----------------------
-
-Filters hosts by availability zone. You must enable this filter for the
-scheduler to respect availability zones in requests.
-
-.. _ComputeCapabilitiesFilter:
-
-ComputeCapabilitiesFilter
--------------------------
-
-Matches properties defined in extra specs for an instance type against compute
-capabilities. If an extra specs key contains a colon (``:``), anything before
-the colon is treated as a namespace and anything after the colon is treated as
-the key to be matched. If a namespace is present and is not ``capabilities``,
-the filter ignores the namespace. For backward compatibility, also treats the
-extra specs key as the key to be matched if no namespace is present; this
-action is highly discouraged because it conflicts with
-:ref:`AggregateInstanceExtraSpecsFilter` filter when you enable both filters.
-
-Some virt drivers support reporting CPU traits to the Placement service. With that
-feature available, you should consider using traits in flavors instead of
-ComputeCapabilitiesFilter, because traits provide consistent naming for CPU
-features in some virt drivers and querying traits is efficient. For more detail, please see
-`Support Matrix `_,
-:ref:`Required traits `,
-:ref:`Forbidden traits ` and
-`Report CPU features to the Placement service `_.
-
-.. _ComputeFilter:
-
-ComputeFilter
--------------
-
-Passes all hosts that are operational and enabled.
-
-In general, you should always enable this filter.
-
-.. _CoreFilter:
-
-CoreFilter
-----------
-
-Only schedules instances on hosts if sufficient CPU cores are available. If
-this filter is not set, the scheduler might over-provision a host based on
-cores. For example, the virtual cores running on an instance may exceed the
-physical cores.
-
-You can configure this filter to enable a fixed amount of vCPU overcommitment
-by using the ``cpu_allocation_ratio`` configuration option in ``nova.conf``.
-The default setting is:
-
-.. code-block:: ini
-
- cpu_allocation_ratio = 16.0
-
-With this setting, if 8 vCPUs are on a node, the scheduler allows instances up
-to 128 vCPU to be run on that node.
-
-To disallow vCPU overcommitment set:
-
-.. code-block:: ini
-
- cpu_allocation_ratio = 1.0
-
-.. note::
-
- The Compute API always returns the actual number of CPU cores available on a
- compute node regardless of the value of the ``cpu_allocation_ratio``
- configuration key. As a result changes to the ``cpu_allocation_ratio`` are
- not reflected via the command line clients or the dashboard. Changes to
- this configuration key are only taken into account internally in the
- scheduler.
-
-DifferentHostFilter
--------------------
-
-Schedules the instance on a different host from a set of instances. To take
-advantage of this filter, the requester must pass a scheduler hint, using
-``different_host`` as the key and a list of instance UUIDs as the value. This
-filter is the opposite of the ``SameHostFilter``. Using the
-:command:`openstack server create` command, use the ``--hint`` flag. For
-example:
-
-.. code-block:: console
-
- $ openstack server create --image cedef40a-ed67-4d10-800e-17455edce175 \
- --flavor 1 --hint different_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \
- --hint different_host=8c19174f-4220-44f0-824a-cd1eeef10287 server-1
-
-With the API, use the ``os:scheduler_hints`` key. For example:
-
-.. code-block:: json
-
- {
- "server": {
- "name": "server-1",
- "imageRef": "cedef40a-ed67-4d10-800e-17455edce175",
- "flavorRef": "1"
- },
- "os:scheduler_hints": {
- "different_host": [
- "a0cf03a5-d921-4877-bb5c-86d26cf818e1",
- "8c19174f-4220-44f0-824a-cd1eeef10287"
- ]
- }
- }
-
-.. _DiskFilter:
-
-DiskFilter
-----------
-
-Only schedules instances on hosts if there is sufficient disk space available
-for root and ephemeral storage.
-
-You can configure this filter to enable a fixed amount of disk overcommitment
-by using the ``disk_allocation_ratio`` configuration option in the
-``nova.conf`` configuration file. The default setting disables the possibility
-of the overcommitment and allows launching a VM only if there is a sufficient
-amount of disk space available on a host:
-
-.. code-block:: ini
-
- disk_allocation_ratio = 1.0
-
-DiskFilter always considers the value of the ``disk_available_least`` property
-and not the one of the ``free_disk_gb`` property of a hypervisor's statistics:
-
-.. code-block:: console
-
- $ openstack hypervisor stats show
- +----------------------+-------+
- | Field | Value |
- +----------------------+-------+
- | count | 1 |
- | current_workload | 0 |
- | disk_available_least | 14 |
- | free_disk_gb | 27 |
- | free_ram_mb | 15374 |
- | local_gb | 27 |
- | local_gb_used | 0 |
- | memory_mb | 15886 |
- | memory_mb_used | 512 |
- | running_vms | 0 |
- | vcpus | 8 |
- | vcpus_used | 0 |
- +----------------------+-------+
-
-As it can be viewed from the command output above, the amount of the available
-disk space can be less than the amount of the free disk space. It happens
-because the ``disk_available_least`` property accounts for the virtual size
-rather than the actual size of images. If you use an image format that is
-sparse or copy on write so that each virtual instance does not require a 1:1
-allocation of a virtual disk to a physical storage, it may be useful to allow
-the overcommitment of disk space.
-
-To enable scheduling instances while overcommitting disk resources on the node,
-adjust the value of the ``disk_allocation_ratio`` configuration option to
-greater than ``1.0``:
-
-.. code-block:: none
-
- disk_allocation_ratio > 1.0
-
-.. note::
-
- If the value is set to ``>1``, we recommend keeping track of the free disk
- space, as the value approaching ``0`` may result in the incorrect
- functioning of instances using it at the moment.
-
-.. _ImagePropertiesFilter:
-
-ImagePropertiesFilter
----------------------
-
-Filters hosts based on properties defined on the instance's image. It passes
-hosts that can support the specified image properties contained in the
-instance. Properties include the architecture, hypervisor type, hypervisor
-version (for Xen hypervisor type only), and virtual machine mode.
-
-For example, an instance might require a host that runs an ARM-based processor,
-and QEMU as the hypervisor. You can decorate an image with these properties by
-using:
-
-.. code-block:: console
-
- $ openstack image set --architecture arm --property hypervisor_type=qemu \
- img-uuid
-
-The image properties that the filter checks for are:
-
-``architecture``
- describes the machine architecture required by the image. Examples are
- ``i686``, ``x86_64``, ``arm``, and ``ppc64``.
-
-``hypervisor_type``
- describes the hypervisor required by the image. Examples are ``xen``,
- ``qemu``, and ``xenapi``.
-
- .. note::
-
- ``qemu`` is used for both QEMU and KVM hypervisor types.
-
-``hypervisor_version_requires``
- describes the hypervisor version required by the image. The property is
- supported for Xen hypervisor type only. It can be used to enable support for
- multiple hypervisor versions, and to prevent instances with newer Xen tools
- from being provisioned on an older version of a hypervisor. If available, the
- property value is compared to the hypervisor version of the compute host.
-
- To filter the hosts by the hypervisor version, add the
- ``hypervisor_version_requires`` property on the image as metadata and pass an
- operator and a required hypervisor version as its value:
-
- .. code-block:: console
-
- $ openstack image set --property hypervisor_type=xen --property \
- hypervisor_version_requires=">=4.3" img-uuid
-
-``vm_mode``
- describes the hypervisor application binary interface (ABI) required by the
- image. Examples are ``xen`` for Xen 3.0 paravirtual ABI, ``hvm`` for native
- ABI, ``uml`` for User Mode Linux paravirtual ABI, ``exe`` for container virt
- executable ABI.
-
-IsolatedHostsFilter
--------------------
-
-Allows the admin to define a special (isolated) set of images and a special
-(isolated) set of hosts, such that the isolated images can only run on the
-isolated hosts, and the isolated hosts can only run isolated images. The flag
-``restrict_isolated_hosts_to_isolated_images`` can be used to force isolated
-hosts to only run isolated images.
-
-The logic within the filter depends on the
-``restrict_isolated_hosts_to_isolated_images`` config option, which defaults
-to True. When True, a volume-backed instance will not be put on an isolated
-host. When False, a volume-backed instance can go on any host, isolated or
-not.
-
-The admin must specify the isolated set of images and hosts in the
-``nova.conf`` file using the ``isolated_hosts`` and ``isolated_images``
-configuration options. For example:
-
-.. code-block:: ini
-
- [filter_scheduler]
- isolated_hosts = server1, server2
- isolated_images = 342b492c-128f-4a42-8d3a-c5088cf27d13, ebd267a6-ca86-4d6c-9a0e-bd132d6b7d09
-
-.. _IoOpsFilter:
-
-IoOpsFilter
------------
-
-The IoOpsFilter filters hosts by concurrent I/O operations on it. Hosts with
-too many concurrent I/O operations will be filtered out. The
-``max_io_ops_per_host`` option specifies the maximum number of I/O intensive
-instances allowed to run on a host. A host will be ignored by the scheduler if
-more than ``max_io_ops_per_host`` instances in build, resize, snapshot,
-migrate, rescue or unshelve task states are running on it.
-
-JsonFilter
-----------
-
-The JsonFilter allows a user to construct a custom filter by passing a
-scheduler hint in JSON format. The following operators are supported:
-
-* =
-* <
-* >
-* in
-* <=
-* >=
-* not
-* or
-* and
-
-The filter supports the following variables:
-
-* ``$free_ram_mb``
-* ``$free_disk_mb``
-* ``$total_usable_ram_mb``
-* ``$vcpus_total``
-* ``$vcpus_used``
-
-Using the :command:`openstack server create` command, use the ``--hint`` flag:
-
-.. code-block:: console
-
- $ openstack server create --image 827d564a-e636-4fc4-a376-d36f7ebe1747 \
- --flavor 1 --hint query='[">=","$free_ram_mb",1024]' server1
-
-With the API, use the ``os:scheduler_hints`` key:
-
-.. code-block:: json
-
- {
- "server": {
- "name": "server-1",
- "imageRef": "cedef40a-ed67-4d10-800e-17455edce175",
- "flavorRef": "1"
- },
- "os:scheduler_hints": {
- "query": "[>=,$free_ram_mb,1024]"
- }
- }
-
-MetricsFilter
--------------
-
-Filters hosts based on meters ``weight_setting``. Only hosts with the
-available meters are passed so that the metrics weigher will not fail due to
-these hosts.
-
-NUMATopologyFilter
-------------------
-
-Filters hosts based on the NUMA topology that was specified for the instance
-through the use of flavor ``extra_specs`` in combination with the image
-properties, as described in detail in the `related nova-spec document
-`_. Filter
-will try to match the exact NUMA cells of the instance to those of the host. It
-will consider the standard over-subscription limits for each host NUMA cell,
-and provide limits to the compute host accordingly.
-
-.. note::
-
- If instance has no topology defined, it will be considered for any host. If
- instance has a topology defined, it will be considered only for NUMA capable
- hosts.
-
-.. _NumInstancesFilter:
-
-NumInstancesFilter
-------------------
-
-Hosts that have more instances running than specified by the
-``max_instances_per_host`` option are filtered out when this filter is in
-place.
-
-PciPassthroughFilter
---------------------
-
-The filter schedules instances on a host if the host has devices that meet the
-device requests in the ``extra_specs`` attribute for the flavor.
-
-.. _RamFilter:
-
-RamFilter
----------
-
-Only schedules instances on hosts that have sufficient RAM available. If this
-filter is not set, the scheduler may over provision a host based on RAM (for
-example, the RAM allocated by virtual machine instances may exceed the physical
-RAM).
-
-You can configure this filter to enable a fixed amount of RAM overcommitment by
-using the ``ram_allocation_ratio`` configuration option in ``nova.conf``. The
-default setting is:
-
-.. code-block:: ini
-
- ram_allocation_ratio = 1.5
-
-This setting enables 1.5 GB instances to run on any compute node with 1 GB of
-free RAM.
-
-RetryFilter
------------
-
-Filters out hosts that have already been attempted for scheduling purposes. If
-the scheduler selects a host to respond to a service request, and the host
-fails to respond to the request, this filter prevents the scheduler from
-retrying that host for the service request.
-
-This filter is only useful if the ``scheduler_max_attempts`` configuration
-option is set to a value greater than zero.
-
-SameHostFilter
---------------
-
-Schedules the instance on the same host as another instance in a set of
-instances. To take advantage of this filter, the requester must pass a
-scheduler hint, using ``same_host`` as the key and a list of instance UUIDs as
-the value. This filter is the opposite of the ``DifferentHostFilter``. Using
-the :command:`openstack server create` command, use the ``--hint`` flag:
-
-.. code-block:: console
-
- $ openstack server create --image cedef40a-ed67-4d10-800e-17455edce175 \
- --flavor 1 --hint same_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \
- --hint same_host=8c19174f-4220-44f0-824a-cd1eeef10287 server-1
-
-With the API, use the ``os:scheduler_hints`` key:
-
-.. code-block:: json
-
- {
- "server": {
- "name": "server-1",
- "imageRef": "cedef40a-ed67-4d10-800e-17455edce175",
- "flavorRef": "1"
- },
- "os:scheduler_hints": {
- "same_host": [
- "a0cf03a5-d921-4877-bb5c-86d26cf818e1",
- "8c19174f-4220-44f0-824a-cd1eeef10287"
- ]
- }
- }
-
-.. _ServerGroupAffinityFilter:
-
-ServerGroupAffinityFilter
--------------------------
-
-The ServerGroupAffinityFilter ensures that an instance is scheduled on to a
-host from a set of group hosts. To take advantage of this filter, the requester
-must create a server group with an ``affinity`` policy, and pass a scheduler
-hint, using ``group`` as the key and the server group UUID as the value. Using
-the :command:`openstack server create` command, use the ``--hint`` flag. For
-example:
-
-.. code-block:: console
-
- $ openstack server group create --policy affinity group-1
- $ openstack server create --image IMAGE_ID --flavor 1 \
- --hint group=SERVER_GROUP_UUID server-1
-
-.. _ServerGroupAntiAffinityFilter:
-
-ServerGroupAntiAffinityFilter
------------------------------
-
-The ServerGroupAntiAffinityFilter ensures that each instance in a group is on a
-different host. To take advantage of this filter, the requester must create a
-server group with an ``anti-affinity`` policy, and pass a scheduler hint, using
-``group`` as the key and the server group UUID as the value. Using the
-:command:`openstack server create` command, use the ``--hint`` flag. For
-example:
-
-.. code-block:: console
-
- $ openstack server group create --policy anti-affinity group-1
- $ openstack server create --image IMAGE_ID --flavor 1 \
- --hint group=SERVER_GROUP_UUID server-1
-
-SimpleCIDRAffinityFilter
-------------------------
-
-Schedules the instance based on host IP subnet range. To take advantage of
-this filter, the requester must specify a range of valid IP address in CIDR
-format, by passing two scheduler hints:
-
-``build_near_host_ip``
- The first IP address in the subnet (for example, ``192.168.1.1``)
-
-``cidr``
- The CIDR that corresponds to the subnet (for example, ``/24``)
-
-Using the :command:`openstack server create` command, use the ``--hint`` flag.
-For example, to specify the IP subnet ``192.168.1.1/24``:
-
-.. code-block:: console
-
- $ openstack server create --image cedef40a-ed67-4d10-800e-17455edce175 \
- --flavor 1 --hint build_near_host_ip=192.168.1.1 --hint cidr=/24 server-1
-
-With the API, use the ``os:scheduler_hints`` key:
-
-.. code-block:: json
-
- {
- "server": {
- "name": "server-1",
- "imageRef": "cedef40a-ed67-4d10-800e-17455edce175",
- "flavorRef": "1"
- },
- "os:scheduler_hints": {
- "build_near_host_ip": "192.168.1.1",
- "cidr": "24"
- }
- }
-
-Cell filters
-~~~~~~~~~~~~
-
-The following sections describe the available cell filters.
-
-.. note::
-
- These filters are only available for cellsv1 which is deprecated.
-
-DifferentCellFilter
--------------------
-
-Schedules the instance on a different cell from a set of instances. To take
-advantage of this filter, the requester must pass a scheduler hint, using
-``different_cell`` as the key and a list of instance UUIDs as the value.
-
-ImagePropertiesFilter
----------------------
-
-Filters cells based on properties defined on the instance's image. This
-filter works specifying the hypervisor required in the image metadata and the
-supported hypervisor version in cell capabilities.
-
-TargetCellFilter
-----------------
-
-Filters target cells. This filter works by specifying a scheduler hint of
-``target_cell``. The value should be the full cell path.
-
-.. _weights:
-
-Weights
-~~~~~~~
-
-When resourcing instances, the filter scheduler filters and weights each host
-in the list of acceptable hosts. Each time the scheduler selects a host, it
-virtually consumes resources on it, and subsequent selections are adjusted
-accordingly. This process is useful when the customer asks for the same large
-amount of instances, because weight is computed for each requested instance.
-
-All weights are normalized before being summed up; the host with the largest
-weight is given the highest priority.
-
-**Weighting hosts**
-
-.. figure:: /figures/nova-weighting-hosts.png
-
-If cells are used, cells are weighted by the scheduler in the same manner as
-hosts.
-
-Hosts and cells are weighted based on the following options in the
-``/etc/nova/nova.conf`` file:
-
-.. list-table:: Host weighting options
- :header-rows: 1
- :widths: 10, 25, 60
-
- * - Section
- - Option
- - Description
- * - [DEFAULT]
- - ``ram_weight_multiplier``
- - By default, the scheduler spreads instances across all hosts evenly.
- Set the ``ram_weight_multiplier`` option to a negative number if you
- prefer stacking instead of spreading. Use a floating-point value.
- * - [DEFAULT]
- - ``scheduler_host_subset_size``
- - New instances are scheduled on a host that is chosen randomly from a
- subset of the N best hosts. This property defines the subset size from
- which a host is chosen. A value of 1 chooses the first host returned by
- the weighting functions. This value must be at least 1. A value less
- than 1 is ignored, and 1 is used instead. Use an integer value.
- * - [DEFAULT]
- - ``scheduler_weight_classes``
- - Defaults to ``nova.scheduler.weights.all_weighers``. Hosts are then
- weighted and sorted with the largest weight winning.
- * - [DEFAULT]
- - ``io_ops_weight_multiplier``
- - Multiplier used for weighing host I/O operations. A negative value means
- a preference to choose light workload compute hosts.
- * - [DEFAULT]
- - ``soft_affinity_weight_multiplier``
- - Multiplier used for weighing hosts for group soft-affinity. Only a
- positive value is meaningful. Negative means that the behavior will
- change to the opposite, which is soft-anti-affinity.
- * - [DEFAULT]
- - ``soft_anti_affinity_weight_multiplier``
- - Multiplier used for weighing hosts for group soft-anti-affinity. Only a
- positive value is meaningful. Negative means that the behavior will
- change to the opposite, which is soft-affinity.
- * - [filter_scheduler]
- - ``build_failure_weight_multiplier``
- - Multiplier used for weighing hosts which have recent build failures. A
- positive value increases the significance of build failures reported by
- the host recently, making them less likely to be chosen.
- * - [metrics]
- - ``weight_multiplier``
- - Multiplier for weighting meters. Use a floating-point value.
- * - [metrics]
- - ``weight_setting``
- - Determines how meters are weighted. Use a comma-separated list of
- metricName=ratio. For example: ``name1=1.0, name2=-1.0`` results in:
- ``name1.value * 1.0 + name2.value * -1.0``
- * - [metrics]
- - ``required``
- - Specifies how to treat unavailable meters:
-
- * True - Raises an exception. To avoid the raised exception, you should
- use the scheduler filter ``MetricFilter`` to filter out hosts with
- unavailable meters.
- * False - Treated as a negative factor in the weighting process (uses
- the ``weight_of_unavailable`` option).
- * - [metrics]
- - ``weight_of_unavailable``
- - If ``required`` is set to False, and any one of the meters set by
- ``weight_setting`` is unavailable, the ``weight_of_unavailable`` value
- is returned to the scheduler.
-
-For example:
-
-.. code-block:: ini
-
- [DEFAULT]
- scheduler_host_subset_size = 1
- scheduler_weight_classes = nova.scheduler.weights.all_weighers
- ram_weight_multiplier = 1.0
- io_ops_weight_multiplier = 2.0
- soft_affinity_weight_multiplier = 1.0
- soft_anti_affinity_weight_multiplier = 1.0
- [metrics]
- weight_multiplier = 1.0
- weight_setting = name1=1.0, name2=-1.0
- required = false
- weight_of_unavailable = -10000.0
-
-.. list-table:: Cell weighting options
- :header-rows: 1
- :widths: 10, 25, 60
-
- * - Section
- - Option
- - Description
- * - [cells]
- - ``mute_weight_multiplier``
- - Multiplier to weight mute children (hosts which have not sent
- capacity or capacity updates for some time).
- Use a negative, floating-point value.
- * - [cells]
- - ``offset_weight_multiplier``
- - Multiplier to weight cells, so you can specify a preferred cell.
- Use a floating point value.
- * - [cells]
- - ``ram_weight_multiplier``
- - By default, the scheduler spreads instances across all cells evenly.
- Set the ``ram_weight_multiplier`` option to a negative number if you
- prefer stacking instead of spreading. Use a floating-point value.
- * - [cells]
- - ``scheduler_weight_classes``
- - Defaults to ``nova.cells.weights.all_weighers``, which maps to all
- cell weighers included with Compute. Cells are then weighted and
- sorted with the largest weight winning.
-
-For example:
-
-.. code-block:: ini
-
- [cells]
- scheduler_weight_classes = nova.cells.weights.all_weighers
- mute_weight_multiplier = -10.0
- ram_weight_multiplier = 1.0
- offset_weight_multiplier = 1.0
-
-Chance scheduler
-~~~~~~~~~~~~~~~~
-
-As an administrator, you work with the filter scheduler. However, the Compute
-service also uses the Chance Scheduler,
-``nova.scheduler.chance.ChanceScheduler``, which randomly selects from lists of
-filtered hosts.
-
-Utilization aware scheduling
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-It is possible to schedule VMs using advanced scheduling decisions. These
-decisions are made based on enhanced usage statistics encompassing data like
-memory cache utilization, memory bandwidth utilization, or network bandwidth
-utilization. This is disabled by default. The administrator can configure how
-the metrics are weighted in the configuration file by using the
-``weight_setting`` configuration option in the ``nova.conf`` configuration
-file. For example to configure metric1 with ratio1 and metric2 with ratio2:
-
-.. code-block:: ini
-
- weight_setting = "metric1=ratio1, metric2=ratio2"
-
-.. _host-aggregates:
-
-Host aggregates and availability zones
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Host aggregates are a mechanism for partitioning hosts in an OpenStack cloud,
-or a region of an OpenStack cloud, based on arbitrary characteristics.
-Examples where an administrator may want to do this include where a group of
-hosts have additional hardware or performance characteristics.
-
-Host aggregates are not explicitly exposed to users. Instead administrators
-map flavors to host aggregates. Administrators do this by setting metadata on
-a host aggregate, and matching flavor extra specifications. The scheduler then
-endeavors to match user requests for instance of the given flavor to a host
-aggregate with the same key-value pair in its metadata. Compute nodes can be
-in more than one host aggregate.
-
-Administrators are able to optionally expose a host aggregate as an
-availability zone. Availability zones are different from host aggregates in
-that they are explicitly exposed to the user, and hosts can only be in a single
-availability zone. Administrators can configure a default availability zone
-where instances will be scheduled when the user fails to specify one.
-
-Command-line interface
-----------------------
-
-The :command:`nova` command-line client supports the following
-aggregate-related commands.
-
-nova aggregate-list
- Print a list of all aggregates.
-
-nova aggregate-create []
- Create a new aggregate named ````, and optionally in availability zone
- ``[]`` if specified. The command returns the ID of the
- newly created aggregate. Hosts can be made available to multiple host
- aggregates. Be careful when adding a host to an additional host aggregate
- when the host is also in an availability zone. Pay attention when using the
- :command:`nova aggregate-set-metadata` and :command:`nova aggregate-update`
- commands to avoid user confusion when they boot instances in different
- availability zones. An error occurs if you cannot add a particular host to
- an aggregate zone for which it is not intended.
-
-nova aggregate-delete
- Delete an aggregate with its ```` or ````.
-
-nova aggregate-show
- Show details of the aggregate with its ```` or ````.
-
-nova aggregate-add-host
- Add host with name ```` to aggregate with its ```` or ````.
-
-nova aggregate-remove-host
- Remove the host with name ```` from the aggregate with its ````
- or ````.
-
-nova aggregate-set-metadata [ ...]
- Add or update metadata (key-value pairs) associated with the aggregate with
- its ```` or ````.
-
-nova aggregate-update [--name ] [--availability-zone ]
- Update the name and/or availability zone for the aggregate.
-
-nova host-list
- List all hosts by service. It has been deprecated since microversion 2.43.
- Use :command:`nova hypervisor-list` instead.
-
-nova hypervisor-list [--matching ] [--marker ] [--limit ]
- List hypervisors.
-
-nova host-update [--status ] [--maintenance ]
- Put/resume host into/from maintenance. It has been deprecated since
- microversion 2.43. To enable or disable a service,
- use :command:`nova service-enable` or :command:`nova service-disable` instead.
-
-nova service-enable
- Enable the service.
-
-nova service-disable [--reason ]
- Disable the service.
-
-.. note::
-
- Only administrators can access these commands. If you try to use these
- commands and the user name and tenant that you use to access the Compute
- service do not have the ``admin`` role or the appropriate privileges, these
- errors occur:
-
- .. code-block:: console
-
- ERROR: Policy doesn't allow compute_extension:aggregates to be performed. (HTTP 403) (Request-ID: req-299fbff6-6729-4cef-93b2-e7e1f96b4864)
-
- .. code-block:: console
-
- ERROR: Policy doesn't allow compute_extension:hosts to be performed. (HTTP 403) (Request-ID: req-ef2400f6-6776-4ea3-b6f1-7704085c27d1)
-
-Configure scheduler to support host aggregates
-----------------------------------------------
-
-One common use case for host aggregates is when you want to support scheduling
-instances to a subset of compute hosts because they have a specific capability.
-For example, you may want to allow users to request compute hosts that have SSD
-drives if they need access to faster disk I/O, or access to compute hosts that
-have GPU cards to take advantage of GPU-accelerated code.
-
-To configure the scheduler to support host aggregates, the
-``scheduler_default_filters`` configuration option must contain the
-``AggregateInstanceExtraSpecsFilter`` in addition to the other filters used by
-the scheduler. Add the following line to ``/etc/nova/nova.conf`` on the host
-that runs the ``nova-scheduler`` service to enable host aggregates filtering,
-as well as the other filters that are typically enabled:
-
-.. code-block:: ini
-
- scheduler_default_filters=AggregateInstanceExtraSpecsFilter,RetryFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
-
-Example: Specify compute hosts with SSDs
-----------------------------------------
-
-This example configures the Compute service to enable users to request nodes
-that have solid-state drives (SSDs). You create a ``fast-io`` host aggregate in
-the ``nova`` availability zone and you add the ``ssd=true`` key-value pair to
-the aggregate. Then, you add the ``node1``, and ``node2`` compute nodes to it.
-
-.. code-block:: console
-
- $ openstack aggregate create --zone nova fast-io
- +-------------------+----------------------------+
- | Field | Value |
- +-------------------+----------------------------+
- | availability_zone | nova |
- | created_at | 2016-12-22T07:31:13.013466 |
- | deleted | False |
- | deleted_at | None |
- | id | 1 |
- | name | fast-io |
- | updated_at | None |
- +-------------------+----------------------------+
-
- $ openstack aggregate set --property ssd=true 1
- +-------------------+----------------------------+
- | Field | Value |
- +-------------------+----------------------------+
- | availability_zone | nova |
- | created_at | 2016-12-22T07:31:13.000000 |
- | deleted | False |
- | deleted_at | None |
- | hosts | [] |
- | id | 1 |
- | name | fast-io |
- | properties | ssd='true' |
- | updated_at | None |
- +-------------------+----------------------------+
-
- $ openstack aggregate add host 1 node1
- +-------------------+--------------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------------+
- | availability_zone | nova |
- | created_at | 2016-12-22T07:31:13.000000 |
- | deleted | False |
- | deleted_at | None |
- | hosts | [u'node1'] |
- | id | 1 |
- | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} |
- | name | fast-io |
- | updated_at | None |
- +-------------------+--------------------------------------------------+
-
- $ openstack aggregate add host 1 node2
- +-------------------+--------------------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------------------+
- | availability_zone | nova |
- | created_at | 2016-12-22T07:31:13.000000 |
- | deleted | False |
- | deleted_at | None |
- | hosts | [u'node1', u'node2'] |
- | id | 1 |
- | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} |
- | name | fast-io |
- | updated_at | None |
- +-------------------+--------------------------------------------------+
-
-Use the :command:`openstack flavor create` command to create the ``ssd.large``
-flavor called with an ID of 6, 8 GB of RAM, 80 GB root disk, and 4 vCPUs.
-
-.. code-block:: console
-
- $ openstack flavor create --id 6 --ram 8192 --disk 80 --vcpus 4 ssd.large
- +----------------------------+-----------+
- | Field | Value |
- +----------------------------+-----------+
- | OS-FLV-DISABLED:disabled | False |
- | OS-FLV-EXT-DATA:ephemeral | 0 |
- | disk | 80 |
- | id | 6 |
- | name | ssd.large |
- | os-flavor-access:is_public | True |
- | ram | 8192 |
- | rxtx_factor | 1.0 |
- | swap | |
- | vcpus | 4 |
- +----------------------------+-----------+
-
-Once the flavor is created, specify one or more key-value pairs that match the
-key-value pairs on the host aggregates with scope
-``aggregate_instance_extra_specs``. In this case, that is the
-``aggregate_instance_extra_specs:ssd=true`` key-value pair. Setting a
-key-value pair on a flavor is done using the :command:`openstack flavor set`
-command.
-
-.. code-block:: console
-
- $ openstack flavor set --property aggregate_instance_extra_specs:ssd=true ssd.large
-
-Once it is set, you should see the ``extra_specs`` property of the
-``ssd.large`` flavor populated with a key of ``ssd`` and a corresponding value
-of ``true``.
-
-.. code-block:: console
-
- $ openstack flavor show ssd.large
- +----------------------------+-------------------------------------------+
- | Field | Value |
- +----------------------------+-------------------------------------------+
- | OS-FLV-DISABLED:disabled | False |
- | OS-FLV-EXT-DATA:ephemeral | 0 |
- | disk | 80 |
- | id | 6 |
- | name | ssd.large |
- | os-flavor-access:is_public | True |
- | properties | aggregate_instance_extra_specs:ssd='true' |
- | ram | 8192 |
- | rxtx_factor | 1.0 |
- | swap | |
- | vcpus | 4 |
- +----------------------------+-------------------------------------------+
-
-Now, when a user requests an instance with the ``ssd.large`` flavor,
-the scheduler only considers hosts with the ``ssd=true`` key-value pair.
-In this example, these are ``node1`` and ``node2``.
-
-Aggregates in Placement
------------------------
-
-Aggregates also exist in placement and are not the same thing as host
-aggregates in nova. These aggregates are defined (purely) as groupings
-of related resource providers. Since compute nodes in nova are
-represented in placement as resource providers, they can be added to a
-placement aggregate as well. For example, get the uuid of the compute
-node using :command:`openstack hypervisor list` and add it to an
-aggregate in placement using :command:`openstack placement aggregate
-set`.
-
-.. code-block:: console
-
- $ openstack --os-compute-api-version=2.53 hypervisor list
- +--------------------------------------+---------------------+-----------------+-----------------+-------+
- | ID | Hypervisor Hostname | Hypervisor Type | Host IP | State |
- +--------------------------------------+---------------------+-----------------+-----------------+-------+
- | 815a5634-86fb-4e1e-8824-8a631fee3e06 | node1 | QEMU | 192.168.1.123 | up |
- +--------------------------------------+---------------------+-----------------+-----------------+-------+
-
- $ openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate df4c74f3-d2c4-4991-b461-f1a678e1d161 815a5634-86fb-4e1e-8824-8a631fee3e06
-
-Some scheduling filter operations can be performed by placement for
-increased speed and efficiency.
-
-.. note::
-
- The nova-api service attempts (as of nova 18.0.0) to automatically mirror
- the association of a compute host with an aggregate when an administrator
- adds or removes a host to/from a nova host aggregate. This should alleviate
- the need to manually create those association records in the placement API
- using the ``openstack resource provider aggregate set`` CLI invocation.
-
-Tenant Isolation with Placement
--------------------------------
-
-In order to use placement to isolate tenants, there must be placement
-aggregates that match the membership and UUID of nova host aggregates
-that you want to use for isolation. The same key pattern in aggregate
-metadata used by the `AggregateMultiTenancyIsolation` filter controls
-this function, and is enabled by setting
-`[scheduler]/limit_tenants_to_placement_aggregate=True`.
-
-.. code-block:: console
-
- $ openstack --os-compute-api-version=2.53 aggregate create myagg
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | availability_zone | None |
- | created_at | 2018-03-29T16:22:23.175884 |
- | deleted | False |
- | deleted_at | None |
- | id | 4 |
- | name | myagg |
- | updated_at | None |
- | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 |
- +-------------------+--------------------------------------+
-
- $ openstack --os-compute-api-version=2.53 aggregate add host myagg node1
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | availability_zone | None |
- | created_at | 2018-03-29T16:22:23.175884 |
- | deleted | False |
- | deleted_at | None |
- | hosts | [u'node1'] |
- | id | 4 |
- | name | myagg |
- | updated_at | None |
- | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 |
- +-------------------+--------------------------------------+
-
- $ openstack project list -f value | grep 'demo'
- 9691591f913949818a514f95286a6b90 demo
-
- $ openstack aggregate set --property filter_tenant_id=9691591f913949818a514f95286a6b90 myagg
-
- $ openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 815a5634-86fb-4e1e-8824-8a631fee3e06
-
-Availability Zones with Placement
----------------------------------
-
-In order to use placement to honor availability zone requests, there must be
-placement aggregates that match the membership and UUID of nova host aggregates
-that you assign as availability zones. The same key in aggregate metadata used
-by the `AvailabilityZoneFilter` filter controls this function, and is enabled by
-setting `[scheduler]/query_placement_for_availability_zone=True`.
-
-.. code-block:: console
-
- $ openstack --os-compute-api-version=2.53 aggregate create myaz
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | availability_zone | None |
- | created_at | 2018-03-29T16:22:23.175884 |
- | deleted | False |
- | deleted_at | None |
- | id | 4 |
- | name | myaz |
- | updated_at | None |
- | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 |
- +-------------------+--------------------------------------+
-
- $ openstack --os-compute-api-version=2.53 aggregate add host myaz node1
- +-------------------+--------------------------------------+
- | Field | Value |
- +-------------------+--------------------------------------+
- | availability_zone | None |
- | created_at | 2018-03-29T16:22:23.175884 |
- | deleted | False |
- | deleted_at | None |
- | hosts | [u'node1'] |
- | id | 4 |
- | name | myagg |
- | updated_at | None |
- | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 |
- +-------------------+--------------------------------------+
-
- $ openstack aggregate set --property availability_zone=az002 myaz
-
- $ openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 815a5634-86fb-4e1e-8824-8a631fee3e06
-
-With the above configuration, the `AvailabilityZoneFilter` filter can be disabled
-in `[filter_scheduler]/enabled_filters` while retaining proper behavior (and doing
-so with the higher performance of placement's implementation).
-
-XenServer hypervisor pools to support live migration
-----------------------------------------------------
-
-When using the XenAPI-based hypervisor, the Compute service uses host
-aggregates to manage XenServer Resource pools, which are used in supporting
-live migration.
-
-Cells considerations
-~~~~~~~~~~~~~~~~~~~~
-
-By default cells are enabled for scheduling new instances but they can be
-disabled (new schedulings to the cell are blocked). This may be useful for
-users while performing cell maintenance, failures or other interventions. It is
-to be noted that creating pre-disabled cells and enabling/disabling existing
-cells should either be followed by a restart or SIGHUP of the nova-scheduler
-service for the changes to take effect.
-
-Command-line interface
-----------------------
-
-The :command:`nova-manage` command-line client supports the cell-disable
-related commands. To enable or disable a cell, use
-:command:`nova-manage cell_v2 update_cell` and to create pre-disabled cells,
-use :command:`nova-manage cell_v2 create_cell`. See the
-:ref:`man-page-cells-v2` man page for details on command usage.
diff --git a/doc/source/admin/configuring-migrations.rst b/doc/source/admin/configuring-migrations.rst
index 1f1a0c0ee98..63edae6e216 100644
--- a/doc/source/admin/configuring-migrations.rst
+++ b/doc/source/admin/configuring-migrations.rst
@@ -10,16 +10,15 @@ source host, but migration can also be useful to redistribute the load when
many VM instances are running on a specific physical machine.
This document covers live migrations using the
-:ref:`configuring-migrations-kvm-libvirt` and
-:ref:`configuring-migrations-xenserver` hypervisors.
+:ref:`configuring-migrations-kvm-libvirt` and VMWare hypervisors
.. :ref:`_configuring-migrations-kvm-libvirt`
-.. :ref:`_configuring-migrations-xenserver`
.. note::
Not all Compute service hypervisor drivers support live-migration, or
- support all live-migration features.
+ support all live-migration features. Similarly not all compute service
+ features are supported.
Consult :doc:`/user/support-matrix` to determine which hypervisors
support live-migration.
@@ -67,21 +66,17 @@ The migration types are:
different host in the same cell, but not across cells.
The following sections describe how to configure your hosts for live migrations
-using the KVM and XenServer hypervisors.
+using the libvirt virt driver and KVM hypervisor.
.. _configuring-migrations-kvm-libvirt:
-KVM-libvirt
-~~~~~~~~~~~
-
-.. :ref:`_configuring-migrations-kvm-general`
-.. :ref:`_configuring-migrations-kvm-block-and-volume-migration`
-.. :ref:`_configuring-migrations-kvm-shared-storage`
+Libvirt
+-------
.. _configuring-migrations-kvm-general:
General configuration
----------------------
+~~~~~~~~~~~~~~~~~~~~~
To enable any type of live migration, configure the compute hosts according to
the instructions below:
@@ -135,20 +130,36 @@ the instructions below:
Be mindful of the security risks introduced by opening ports.
+.. _`configuring-migrations-securing-live-migration-streams`:
+
+Securing live migration streams
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If your compute nodes have at least libvirt 4.4.0 and QEMU 2.11.0, it is
+strongly recommended to secure all your live migration streams by taking
+advantage of the "QEMU-native TLS" feature. This requires a
+pre-existing PKI (Public Key Infrastructure) setup. For further details
+on how to set this all up, refer to the
+:doc:`secure-live-migration-with-qemu-native-tls` document.
+
+
.. _configuring-migrations-kvm-block-and-volume-migration:
Block migration, volume-based live migration
---------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-No additional configuration is required for block migration and volume-backed
-live migration.
+If your environment satisfies the requirements for "QEMU-native TLS",
+then block migration requires some setup; refer to the above section,
+`Securing live migration streams`_, for details. Otherwise, no
+additional configuration is required for block migration and
+volume-backed live migration.
Be aware that block migration adds load to the network and storage subsystems.
.. _configuring-migrations-kvm-shared-storage:
Shared storage
---------------
+~~~~~~~~~~~~~~
Compute hosts have many options for sharing storage, for example NFS, shared
disk array LUNs, Ceph or GlusterFS.
@@ -208,7 +219,7 @@ hosts.
.. _configuring-migrations-kvm-advanced:
Advanced configuration for KVM and QEMU
----------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Live migration copies the instance's memory from the source to the destination
compute host. After a memory page has been copied, the instance may write to it
@@ -221,27 +232,17 @@ memory-intensive instances succeed.
#. **Live migration completion timeout**
- The Compute service aborts a migration when it has been running for too
- long. The timeout is calculated based on the instance size, which is the
- instance's memory size in GiB. In the case of block migration, the size of
- ephemeral storage in GiB is added.
+ The Compute service will either abort or force complete a migration
+ when it has been running too long. This behavior is configurable
+ using the :oslo.config:option:`libvirt.live_migration_timeout_action`
+ config option. The timeout is calculated based on the instance size, which
+ is the instance's memory size in GiB. In the case of block migration, the
+ size of ephemeral storage in GiB is added.
The timeout in seconds is the instance size multiplied by the configurable
- parameter ``live_migration_completion_timeout``, whose default is 800. For
- example, shared-storage live migration of an instance with 8GiB memory will
- time out after 6400 seconds.
-
-#. **Live migration progress timeout**
-
- The Compute service also aborts a live migration when it detects that memory
- copy is not making progress for a certain time. You can set this time, in
- seconds, through the configurable parameter
- ``live_migration_progress_timeout``.
-
- In Ocata, the default value of ``live_migration_progress_timeout`` is 0,
- which disables progress timeouts. You should not change this value, since
- the algorithm that detects memory copy progress has been determined to be
- unreliable. It may be re-enabled in future releases.
+ parameter :oslo.config:option:`libvirt.live_migration_completion_timeout`,
+ whose default is 800. For example, shared-storage live migration of an
+ instance with 8GiB memory will time out after 6400 seconds.
#. **Instance downtime**
@@ -316,81 +317,16 @@ memory-intensive instances succeed.
The full list of live migration configuration parameters is documented in the
:doc:`Nova Configuration Options `
-.. _configuring-migrations-xenserver:
-
-XenServer
-~~~~~~~~~
-
-.. :ref:Shared Storage
-.. :ref:Block migration
-
-.. _configuring-migrations-xenserver-shared-storage:
-
-Shared storage
---------------
-
-**Prerequisites**
-
-- **Compatible XenServer hypervisors**.
-
- For more information, see the `Requirements for Creating Resource Pools
- `_
- section of the XenServer Administrator's Guide.
-
-- **Shared storage**.
-
- An NFS export, visible to all XenServer hosts.
-
- .. note::
-
- For the supported NFS versions, see the `NFS and SMB
- `_
- section of the XenServer Administrator's Guide.
-
-To use shared storage live migration with XenServer hypervisors, the hosts must
-be joined to a XenServer pool.
-
-.. rubric:: Using shared storage live migrations with XenServer Hypervisors
-
-#. Add an NFS VHD storage to your master XenServer, and set it as the default
- storage repository. For more information, see NFS VHD in the XenServer
- Administrator's Guide.
-
-#. Configure all compute nodes to use the default storage repository (``sr``)
- for pool operations. Add this line to your ``nova.conf`` configuration files
- on all compute nodes:
-
- .. code-block:: ini
-
- sr_matching_filter=default-sr:true
-
-#. To add a host to a pool, you need to know the pool master ip address,
- user name and password. Run below command on the XenServer host:
-
- .. code-block:: console
-
- $ xe pool-join master-address=MASTER_IP master-username=root master-password=MASTER_PASSWORD
-
- .. note::
-
- The added compute node and the host will shut down to join the host to
- the XenServer pool. The operation will fail if any server other than the
- compute node is running or suspended on the host.
-
-.. _configuring-migrations-xenserver-block-migration:
-
-Block migration
----------------
-- **Compatible XenServer hypervisors**.
+VMware
+------
- The hypervisors must support the Storage XenMotion feature. See your
- XenServer manual to make sure your edition has this feature.
+.. :ref:`_configuring-migrations-vmware`
- .. note::
+.. _configuring-migrations-vmware:
- - To use block migration, you must use the ``--block-migrate`` parameter
- with the live migration command.
+vSphere configuration
+~~~~~~~~~~~~~~~~~~~~~
- - Block migration works only with EXT local storage storage repositories,
- and the server must not have any volumes attached.
+Enable vMotion on all ESX hosts which are managed by Nova by following the
+instructions in `this `_ KB article.
diff --git a/doc/source/admin/cpu-models.rst b/doc/source/admin/cpu-models.rst
new file mode 100644
index 00000000000..06ffdb61b66
--- /dev/null
+++ b/doc/source/admin/cpu-models.rst
@@ -0,0 +1,320 @@
+==========
+CPU models
+==========
+
+Nova allows you to control the guest CPU model that is exposed to instances.
+Use cases include:
+
+* To maximize performance of instances by exposing new host CPU features to the
+ guest
+
+* To ensure a consistent default behavior across all machines, removing
+ reliance on system defaults.
+
+.. important::
+
+ The functionality described below is currently only supported by the
+ libvirt driver.
+
+
+CPU modes
+---------
+
+In libvirt, the CPU is specified by providing a base CPU model name (which is a
+shorthand for a set of feature flags), a set of additional feature flags, and
+the topology (sockets/cores/threads). The libvirt KVM driver provides a number
+of standard CPU model names. These models are defined in
+``/usr/share/libvirt/cpu_map/*.xml``. You can inspect these files to determine
+which models are supported by your local installation.
+
+Two Compute configuration options in the :oslo.config:group:`libvirt` group
+of ``nova.conf`` define which type of CPU model is exposed to the hypervisor
+when using KVM: :oslo.config:option:`libvirt.cpu_mode` and
+:oslo.config:option:`libvirt.cpu_models`.
+
+The :oslo.config:option:`libvirt.cpu_mode` option can take one of the following
+values: ``none``, ``host-passthrough``, ``host-model``, and ``custom``.
+
+See `Effective Virtual CPU configuration in Nova`__ for a recorded presentation
+about this topic.
+
+.. __: https://www.openstack.org/videos/summits/berlin-2018/effective-virtual-cpu-configuration-in-nova
+
+Host model
+~~~~~~~~~~
+
+If :oslo.config:option:`cpu_mode=host-model `, the CPU model
+in ``/usr/share/libvirt/cpu_map/*.xml`` that most closely matches the host and
+requests additional CPU flags to complete the match. This CPU model has a
+number of advantages:
+
+* It provides almost all of the host CPU features to the guest, thus providing
+ close to the maximum functionality and performance possible.
+
+* It auto-adds critical guest CPU flags for mitigation from certain security
+ flaws, *provided* the CPU microcode, kernel, QEMU, and libvirt are all
+ updated.
+
+* It computes live migration compatibility, with the caveat that live migration
+ in both directions is not always possible.
+
+In general, using ``host-model`` is a safe choice if your compute node CPUs are
+largely identical. However, if your compute nodes span multiple processor
+generations, you may be better advised to select a ``custom`` CPU model.
+
+The ``host-model`` CPU model is the default for the KVM & QEMU hypervisors
+(:oslo.config:option:`libvirt.virt_type`\ =``kvm``/``qemu``)
+
+.. note::
+
+ As noted above, live migration is not always possible in both directions
+ when using ``host-model``. During live migration, the source CPU model
+ definition is transferred to the destination host as-is. This results in the
+ migrated guest on the destination seeing exactly the same CPU model as on
+ source even if the destination compute host is capable of providing more CPU
+ features. However, shutting down and restarting the guest on the may present
+ different hardware to the guest, as per the new capabilities of the
+ destination compute.
+
+Host passthrough
+~~~~~~~~~~~~~~~~
+
+If :oslo.config:option:`cpu_mode=host-passthrough `, libvirt
+tells KVM to pass through the host CPU with no modifications. In comparison to
+``host-model`` which simply matches feature flags, ``host-passthrough`` ensures
+every last detail of the host CPU is matched. This gives the best performance,
+and can be important to some apps which check low level CPU details, but it
+comes at a cost with respect to migration.
+
+In ``host-passthrough`` mode, the guest can only be live-migrated to a target
+host that matches the source host extremely closely. This includes the physical
+CPU model and running microcode, and may even include the running kernel. Use
+this mode only if your compute nodes have a very large degree of homogeneity
+(i.e. substantially all of your compute nodes use the exact same CPU generation
+and model), and you make sure to only live-migrate between hosts with exactly
+matching kernel versions. Failure to do so will result in an inability to
+support any form of live migration.
+
+.. note::
+
+ The reason for that it is necessary for the CPU microcode versions to match
+ is that hardware performance counters are exposed to an instance and it is
+ likely that they may vary between different CPU models. There may also be
+ other reasons due to security fixes for some hardware security flaws being
+ included in CPU microcode.
+
+Custom
+~~~~~~
+
+If :oslo.config:option:`cpu_mode=custom `, you can explicitly
+specify an ordered list of supported named models using the
+:oslo.config:option:`libvirt.cpu_models` configuration option. It is expected
+that the list is ordered so that the more common and less advanced CPU models
+are listed earlier.
+
+In selecting the ``custom`` mode, along with a
+:oslo.config:option:`libvirt.cpu_models` that matches the oldest of your compute
+node CPUs, you can ensure that live migration between compute nodes will always
+be possible. However, you should ensure that the
+:oslo.config:option:`libvirt.cpu_models` you select passes the correct CPU
+feature flags to the guest.
+
+If you need to further tweak your CPU feature flags in the ``custom`` mode, see
+`CPU feature flags`_.
+
+.. note::
+
+ If :oslo.config:option:`libvirt.cpu_models` is configured,
+ the CPU models in the list needs to be compatible with the host CPU. Also, if
+ :oslo.config:option:`libvirt.cpu_model_extra_flags` is configured, all flags
+ needs to be compatible with the host CPU. If incompatible CPU models or flags
+ are specified, nova service will raise an error and fail to start.
+
+None
+~~~~
+
+If :oslo.config:option:`cpu_mode=none `, libvirt does not
+specify a CPU model. Instead, the hypervisor chooses the default model.
+
+The ``none`` CPU model is the default for all non-KVM.QEMU hypervisors.
+(:oslo.config:option:`libvirt.virt_type`\ !=``kvm``/``qemu``)
+
+
+CPU feature flags
+-----------------
+
+.. versionadded:: 18.0.0 (Rocky)
+
+Regardless of your configured :oslo.config:option:`libvirt.cpu_mode`, it is
+also possible to selectively enable additional feature flags. This can be
+accomplished using the :oslo.config:option:`libvirt.cpu_model_extra_flags`
+config option. For example, suppose you have configured a custom CPU model of
+``IvyBridge``, which normally does not enable the ``pcid`` feature flag, but
+you do want to pass ``pcid`` into your guest instances. In this case, you could
+configure the following in ``nova.conf`` to enable this flag.
+
+.. code-block:: ini
+
+ [libvirt]
+ cpu_mode = custom
+ cpu_models = IvyBridge
+ cpu_model_extra_flags = pcid
+
+An end user can also specify required CPU features through traits. When
+specified, the libvirt driver will select the first CPU model in the
+:oslo.config:option:`libvirt.cpu_models` list that can provide the requested
+feature traits. If no CPU feature traits are specified then the instance will
+be configured with the first CPU model in the list.
+
+Consider the following ``nova.conf``:
+
+.. code-block:: ini
+
+ [libvirt]
+ cpu_mode = custom
+ cpu_models = Penryn,IvyBridge,Haswell,Broadwell,Skylake-Client
+
+These different CPU models support different feature flags and are correctly
+configured in order of oldest (and therefore most widely supported) to newest.
+If the user explicitly required the ``avx`` and ``avx2`` CPU features, the
+latter of which is only found of Haswell-generation processors or newer, then
+they could request them using the
+:nova:extra-spec:`trait{group}:HW_CPU_X86_AVX` and
+:nova:extra-spec:`trait{group}:HW_CPU_X86_AVX2` flavor extra specs. For
+example:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property trait:HW_CPU_X86_AVX=required \
+ --property trait:HW_CPU_X86_AVX2=required
+
+As ``Haswell`` is the first CPU model supporting both of these CPU features,
+the instance would be configured with this model.
+
+.. _mitigation-for-Intel-MDS-security-flaws:
+
+Mitigation for MDS ("Microarchitectural Data Sampling") Security Flaws
+----------------------------------------------------------------------
+
+In May 2019, four new microprocessor flaws, known as `MDS`__ and also referred
+to as `RIDL and Fallout`__ or `ZombieLoad`__, were discovered.
+These flaws affect unpatched Nova compute nodes and instances running on Intel
+x86_64 CPUs.
+
+.. __: https://access.redhat.com/security/vulnerabilities/mds
+.. __: https://mdsattacks.com/
+.. __: https://zombieloadattack.com
+
+Resolution
+~~~~~~~~~~
+
+To get mitigation for the said MDS security flaws, a new CPU flag,
+``md-clear``, needs to be exposed to the Nova instances. This can be done as
+follows.
+
+#. Update the following components to the versions from your Linux
+ distribution that have fixes for the MDS flaws, on all compute nodes
+ with Intel x86_64 CPUs:
+
+ - ``microcode_ctl``
+ - ``kernel``
+ - ``qemu-system-x86``
+ - ``libvirt``
+
+#. When using the libvirt driver, ensure that the CPU flag ``md-clear``
+ is exposed to the Nova instances. This can be done in one of three ways,
+ depending on your configured CPU mode:
+
+ #. :oslo.config:option:`libvirt.cpu_mode`\ =host-model
+
+ When using the ``host-model`` CPU mode, the ``md-clear`` CPU flag
+ will be passed through to the Nova guests automatically.
+
+ This mode is the default, when
+ :oslo.config:option:`libvirt.virt_type`\ =kvm|qemu is set in
+ ``/etc/nova/nova-cpu.conf`` on compute nodes.
+
+ #. :oslo.config:option:`libvirt.cpu_mode`\ =host-passthrough
+
+ When using the ``host-passthrough`` CPU mode, the ``md-clear`` CPU
+ flag will be passed through to the Nova guests automatically.
+
+ #. :oslo.config:option:`libvirt.cpu_mode`\ =custom
+
+ When using the ``custom`` CPU mode, you must *explicitly* enable the
+ CPU flag ``md-clear`` to the Nova instances, in addition to the
+ flags required for previous vulnerabilities, using the
+ :oslo.config:option:`libvirt.cpu_model_extra_flags`. For example:
+
+ .. code-block:: ini
+
+ [libvirt]
+ cpu_mode = custom
+ cpu_models = IvyBridge
+ cpu_model_extra_flags = spec-ctrl,ssbd,md-clear
+
+#. Reboot the compute node for the fixes to take effect.
+
+ To minimize workload downtime, you may wish to live migrate all guests to
+ another compute node first.
+
+Once the above steps have been taken on every vulnerable compute node in the
+deployment, each running guest in the cluster must be fully powered down, and
+cold-booted (i.e. an explicit stop followed by a start), in order to activate
+the new CPU models. This can be done by the guest administrators at a time of
+their choosing.
+
+Validation
+~~~~~~~~~~
+
+After applying relevant updates, administrators can check the kernel's
+``sysfs`` interface to see what mitigation is in place, by running the
+following command on the host:
+
+.. code-block:: bash
+
+ # cat /sys/devices/system/cpu/vulnerabilities/mds
+ Mitigation: Clear CPU buffers; SMT vulnerable
+
+To unpack the message "Mitigation: Clear CPU buffers; SMT vulnerable":
+
+- ``Mitigation: Clear CPU buffers`` means you have the "CPU buffer clearing"
+ mitigation enabled, which is mechanism to invoke a flush of various
+ exploitable CPU buffers by invoking a CPU instruction called "VERW".
+
+- ``SMT vulnerable`` means, depending on your workload, you may still be
+ vulnerable to SMT-related problems. You need to evaluate whether your
+ workloads need SMT (also called "Hyper-Threading") to be disabled or not.
+ Refer to the guidance from your Linux distribution and processor vendor.
+
+To see the other possible values for
+``/sys/devices/system/cpu/vulnerabilities/mds``, refer to the `MDS system
+information`__ section in Linux kernel's documentation for MDS.
+
+On the host, validate that KVM is capable of exposing the ``md-clear`` flag to
+guests:
+
+.. code-block:: bash
+
+ # virsh domcapabilities kvm | grep md-clear
+
+
+More information can be found on the 'Diagnosis' tab of `this security notice
+document`__.
+
+.. __: https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html#mds-system-information
+.. __: https://access.redhat.com/security/vulnerabilities/mds
+
+Performance Impact
+~~~~~~~~~~~~~~~~~~
+
+Refer to this section titled "Performance Impact and Disabling MDS" from
+`this security notice document`__, under the *Resolve* tab.
+
+.. note::
+
+ Although the article referred to is from Red Hat, the findings and
+ recommendations about performance impact apply for other distributions also.
+
+.. __: https://access.redhat.com/security/vulnerabilities/mds
diff --git a/doc/source/admin/cpu-topologies.rst b/doc/source/admin/cpu-topologies.rst
index 5c9174e1c33..179f7bd3775 100644
--- a/doc/source/admin/cpu-topologies.rst
+++ b/doc/source/admin/cpu-topologies.rst
@@ -7,8 +7,10 @@ control over how instances run on hypervisor CPUs and the topology of virtual
CPUs available to instances. These features help minimize latency and maximize
performance.
+.. include:: /common/numa-live-migration-warning.txt
+
SMP, NUMA, and SMT
-~~~~~~~~~~~~~~~~~~
+------------------
Symmetric multiprocessing (SMP)
SMP is a design found in many modern multi-core systems. In an SMP system,
@@ -44,8 +46,20 @@ In OpenStack, SMP CPUs are known as *cores*, NUMA cells or nodes are known as
eight core system with Hyper-Threading would have four sockets, eight cores per
socket and two threads per core, for a total of 64 CPUs.
+PCPU and VCPU
+-------------
+
+PCPU
+ Resource class representing an amount of dedicated CPUs for a single guest.
+
+VCPU
+ Resource class representing a unit of CPU resources for a single guest
+ approximating the processing power of a single physical processor.
+
+.. _numa-topologies:
+
Customizing instance NUMA placement policies
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------
.. important::
@@ -77,14 +91,9 @@ vCPUs of different NUMA cells on the instance to the corresponding NUMA cells
on the host. It will also expose the NUMA topology of the instance to the
guest OS.
-If you want compute to pin a particular vCPU as part of this process,
-set the ``vcpu_pin_set`` parameter in the ``nova.conf`` configuration
-file. For more information about the ``vcpu_pin_set`` parameter, see the
-:doc:`/configuration/config`.
-
In all cases where NUMA awareness is used, the ``NUMATopologyFilter``
filter must be enabled. Details on this filter are provided in
-:doc:`/admin/configuration/schedulers`.
+:doc:`/admin/scheduling`.
.. caution::
@@ -106,12 +115,14 @@ filter must be enabled. Details on this filter are provided in
When used, NUMA awareness allows the operating system of the instance to
intelligently schedule the workloads that it runs and minimize cross-node
-memory bandwidth. To restrict an instance's vCPUs to a single host NUMA node,
+memory bandwidth. To configure guest NUMA nodes, you can use the
+:nova:extra-spec:`hw:numa_nodes` flavor extra spec.
+For example, to restrict an instance's vCPUs to a single host NUMA node,
run:
.. code-block:: console
- $ openstack flavor set m1.large --property hw:numa_nodes=1
+ $ openstack flavor set $FLAVOR --property hw:numa_nodes=1
Some workloads have very demanding requirements for memory access latency or
bandwidth that exceed the memory bandwidth available from a single NUMA node.
@@ -122,40 +133,73 @@ nodes, run:
.. code-block:: console
- $ openstack flavor set m1.large --property hw:numa_nodes=2
+ $ openstack flavor set $FLAVOR --property hw:numa_nodes=2
-The allocation of instances vCPUs and memory from different host NUMA nodes can
+The allocation of instance vCPUs and memory from different host NUMA nodes can
be configured. This allows for asymmetric allocation of vCPUs and memory, which
-can be important for some workloads. To spread the 6 vCPUs and 6 GB of memory
+can be important for some workloads. You can configure the allocation of
+instance vCPUs and memory across each **guest** NUMA node using the
+:nova:extra-spec:`hw:numa_cpus.{num}` and :nova:extra-spec:`hw:numa_mem.{num}`
+extra specs respectively.
+For example, to spread the 6 vCPUs and 6 GB of memory
of an instance across two NUMA nodes and create an asymmetric 1:2 vCPU and
memory mapping between the two nodes, run:
.. code-block:: console
- $ openstack flavor set m1.large --property hw:numa_nodes=2
- $ openstack flavor set m1.large \ # configure guest node 0
+ $ openstack flavor set $FLAVOR --property hw:numa_nodes=2
+ # configure guest node 0
+ $ openstack flavor set $FLAVOR \
--property hw:numa_cpus.0=0,1 \
--property hw:numa_mem.0=2048
- $ openstack flavor set m1.large \ # configure guest node 1
+ # configure guest node 1
+ $ openstack flavor set $FLAVOR \
--property hw:numa_cpus.1=2,3,4,5 \
--property hw:numa_mem.1=4096
+.. note::
+
+ The ``{num}`` parameter is an index of *guest* NUMA nodes and may not
+ correspond to *host* NUMA nodes. For example, on a platform with two NUMA
+ nodes, the scheduler may opt to place guest NUMA node 0, as referenced in
+ ``hw:numa_mem.0`` on host NUMA node 1 and vice versa. Similarly, the
+ CPUs bitmask specified in the value for ``hw:numa_cpus.{num}`` refer to
+ *guest* vCPUs and may not correspond to *host* CPUs. As such, this feature
+ cannot be used to constrain instances to specific host CPUs or NUMA nodes.
+
+.. warning::
+
+ If the combined values of ``hw:numa_cpus.{num}`` or ``hw:numa_mem.{num}``
+ are greater than the available number of CPUs or memory respectively, an
+ exception will be raised.
+
.. note::
Hyper-V does not support asymmetric NUMA topologies, and the Hyper-V
driver will not spawn instances with such topologies.
For more information about the syntax for ``hw:numa_nodes``, ``hw:numa_cpus.N``
-and ``hw:num_mem.N``, refer to the :ref:`NUMA
-topology ` guide.
+and ``hw:num_mem.N``, refer to :doc:`/configuration/extra-specs`.
+
+
+.. _cpu-pinning-policies:
Customizing instance CPU pinning policies
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------
.. important::
The functionality described below is currently only supported by the
- libvirt/KVM driver. Hyper-V does not support CPU pinning.
+ libvirt/KVM driver and requires :ref:`some host configuration
+ ` for this to work. Hyper-V does not support CPU
+ pinning.
+
+.. note::
+
+ There is no correlation required between the NUMA topology exposed in the
+ instance and how the instance is actually pinned on the host. This is by
+ design. See this `invalid bug
+ `_ for more information.
By default, instance vCPU processes are not assigned to any particular host
CPU, instead, they float across host CPUs like any other process. This allows
@@ -168,74 +212,216 @@ possible with the latency introduced by the default CPU policy. For such
workloads, it is beneficial to control which host CPUs are bound to an
instance's vCPUs. This process is known as pinning. No instance with pinned
CPUs can use the CPUs of another pinned instance, thus preventing resource
-contention between instances. To configure a flavor to use pinned vCPUs, a
-use a dedicated CPU policy. To force this, run:
+contention between instances.
+
+CPU pinning policies can be used to determine whether an instance should be
+pinned or not. They can be configured using the
+:nova:extra-spec:`hw:cpu_policy` extra spec and equivalent image metadata
+property. There are three policies: ``dedicated``, ``mixed`` and
+``shared`` (the default). The ``dedicated`` CPU policy is used to specify
+that all CPUs of an instance should use pinned CPUs. To configure a flavor to
+use the ``dedicated`` CPU policy, run:
.. code-block:: console
- $ openstack flavor set m1.large --property hw:cpu_policy=dedicated
+ $ openstack flavor set $FLAVOR --property hw:cpu_policy=dedicated
-.. caution::
+This works by ensuring ``PCPU`` allocations are used instead of ``VCPU``
+allocations. As such, it is also possible to request this resource type
+explicitly. To configure this, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR --property resources:PCPU=N
+
+(where ``N`` is the number of vCPUs defined in the flavor).
+
+.. note::
+
+ It is not currently possible to request ``PCPU`` and ``VCPU`` resources in
+ the same instance.
+
+The ``shared`` CPU policy is used to specify that an instance **should not**
+use pinned CPUs. To configure a flavor to use the ``shared`` CPU policy, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR --property hw:cpu_policy=shared
+
+The ``mixed`` CPU policy is used to specify that an instance use pinned CPUs
+along with unpinned CPUs. The instance pinned CPU could be specified in the
+:nova:extra-spec:`hw:cpu_dedicated_mask` or, if :doc:`real-time ` is
+enabled, in the :nova:extra-spec:`hw:cpu_realtime_mask` extra spec. For
+example, to configure a flavor to use the ``mixed`` CPU policy with 4 vCPUs in
+total and the first 2 vCPUs as pinned CPUs, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --vcpus=4 \
+ --property hw:cpu_policy=mixed \
+ --property hw:cpu_dedicated_mask=0-1
+
+To configure a flavor to use the ``mixed`` CPU policy with 4 vCPUs in total and
+the first 2 vCPUs as pinned **real-time** CPUs, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --vcpus=4 \
+ --property hw:cpu_policy=mixed \
+ --property hw:cpu_realtime=yes \
+ --property hw:cpu_realtime_mask=0-1
+
+.. note::
+
+ For more information about the syntax for ``hw:cpu_policy``,
+ ``hw:cpu_dedicated_mask``, ``hw:realtime_cpu`` and ``hw:cpu_realtime_mask``,
+ refer to :doc:`/configuration/extra-specs`
+
+.. note::
+
+ For more information about real-time functionality, refer to the
+ :doc:`documentation `.
+
+It is also possible to configure the CPU policy via image metadata. This can
+be useful when packaging applications that require real-time or near real-time
+behavior by ensuring instances created with a given image are always pinned
+regardless of flavor. To configure an image to use the ``dedicated`` CPU
+policy, run:
+
+.. code-block:: console
- Host aggregates should be used to separate pinned instances from unpinned
- instances as the latter will not respect the resourcing requirements of
- the former.
+ $ openstack image set $IMAGE --property hw_cpu_policy=dedicated
-When running workloads on SMT hosts, it is important to be aware of the impact
-that thread siblings can have. Thread siblings share a number of components
-and contention on these components can impact performance. To configure how
-to use threads, a CPU thread policy should be specified. For workloads where
-sharing benefits performance, use thread siblings. To force this, run:
+Likewise, to configure an image to use the ``shared`` CPU policy, run:
.. code-block:: console
- $ openstack flavor set m1.large \
+ $ openstack image set $IMAGE --property hw_cpu_policy=shared
+
+.. note::
+
+ For more information about image metadata, refer to the `Image metadata`_
+ guide.
+
+.. important::
+
+ Flavor-based policies take precedence over image-based policies. For
+ example, if a flavor specifies a CPU policy of ``dedicated`` then that
+ policy will be used. If the flavor specifies a CPU policy of
+ ``shared`` and the image specifies no policy or a policy of ``shared`` then
+ the ``shared`` policy will be used. However, the flavor specifies a CPU
+ policy of ``shared`` and the image specifies a policy of ``dedicated``, or
+ vice versa, an exception will be raised. This is by design. Image metadata
+ is often configurable by non-admin users, while flavors are only
+ configurable by admins. By setting a ``shared`` policy through flavor
+ extra-specs, administrators can prevent users configuring CPU policies in
+ images and impacting resource utilization.
+
+Customizing instance CPU thread pinning policies
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. important::
+
+ The functionality described below requires the use of pinned instances and
+ is therefore currently only supported by the libvirt/KVM driver and requires
+ :ref:`some host configuration ` for this to work.
+ Hyper-V does not support CPU pinning.
+
+When running pinned instances on SMT hosts, it may also be necessary to
+consider the impact that thread siblings can have on the instance workload. The
+presence of an SMT implementation like Intel Hyper-Threading can boost
+performance `by up to 30%`__ for some workloads. However, thread siblings
+share a number of components and contention on these components can diminish
+performance for other workloads. For this reason, it is also possible to
+explicitly request hosts with or without SMT.
+
+__ https://software.intel.com/en-us/articles/how-to-determine-the-effectiveness-of-hyper-threading-technology-with-an-application
+
+To configure whether an instance should be placed on a host with SMT or not, a
+CPU thread policy may be specified. For workloads where sharing benefits
+performance, you can request hosts **with** SMT. To configure this, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
--property hw:cpu_policy=dedicated \
--property hw:cpu_thread_policy=require
+This will ensure the instance gets scheduled to a host with SMT by requesting
+hosts that report the ``HW_CPU_HYPERTHREADING`` trait. It is also possible to
+request this trait explicitly. To configure this, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property resources:PCPU=N \
+ --property trait:HW_CPU_HYPERTHREADING=required
+
For other workloads where performance is impacted by contention for resources,
-use non-thread siblings or non-SMT hosts. To force this, run:
+you can request hosts **without** SMT. To configure this, run:
.. code-block:: console
- $ openstack flavor set m1.large \
+ $ openstack flavor set $FLAVOR \
--property hw:cpu_policy=dedicated \
--property hw:cpu_thread_policy=isolate
-Finally, for workloads where performance is minimally impacted, use thread
-siblings if available. This is the default, but it can be set explicitly:
+This will ensure the instance gets scheduled to a host without SMT by
+requesting hosts that **do not** report the ``HW_CPU_HYPERTHREADING`` trait.
+It is also possible to request this trait explicitly. To configure this, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property resources:PCPU=N \
+ --property trait:HW_CPU_HYPERTHREADING=forbidden
+
+Finally, for workloads where performance is minimally impacted, you may use
+thread siblings if available and fallback to not using them if necessary. This
+is the default, but it can be set explicitly:
.. code-block:: console
- $ openstack flavor set m1.large \
+ $ openstack flavor set $FLAVOR \
--property hw:cpu_policy=dedicated \
--property hw:cpu_thread_policy=prefer
-For more information about the syntax for ``hw:cpu_policy`` and
-``hw:cpu_thread_policy``, refer to the :doc:`/admin/flavors` guide.
+This does not utilize traits and, as such, there is no trait-based equivalent.
-Applications are frequently packaged as images. For applications that require
-real-time or near real-time behavior, configure image metadata to ensure
-created instances are always pinned regardless of flavor. To configure an
-image to use pinned vCPUs and avoid thread siblings, run:
+.. note::
+
+ For more information about the syntax for ``hw:cpu_thread_policy``, refer to
+ :doc:`/configuration/extra-specs`.
+
+As with CPU policies, it also possible to configure the CPU thread policy via
+image metadata. This can be useful when packaging applications that require
+real-time or near real-time behavior by ensuring instances created with a given
+image are always pinned regardless of flavor. To configure an image to use the
+``require`` CPU policy, run:
+
+.. code-block:: console
+
+ $ openstack image set $IMAGE \
+ --property hw_cpu_policy=dedicated \
+ --property hw_cpu_thread_policy=require
+
+Likewise, to configure an image to use the ``isolate`` CPU thread policy, run:
.. code-block:: console
- $ openstack image set [IMAGE_ID] \
+ $ openstack image set $IMAGE \
--property hw_cpu_policy=dedicated \
--property hw_cpu_thread_policy=isolate
-If the flavor specifies a CPU policy of ``dedicated`` then that policy will be
-used. If the flavor explicitly specifies a CPU policy of ``shared`` and the
-image specifies no policy or a policy of ``shared`` then the ``shared`` policy
-will be used, but if the image specifies a policy of ``dedicated`` an exception
-will be raised. By setting a ``shared`` policy through flavor extra-specs,
-administrators can prevent users configuring CPU policies in images and
-impacting resource utilization. To configure this policy, run:
+Finally, to configure an image to use the ``prefer`` CPU thread policy, run:
.. code-block:: console
- $ openstack flavor set m1.large --property hw:cpu_policy=shared
+ $ openstack image set $IMAGE \
+ --property hw_cpu_policy=dedicated \
+ --property hw_cpu_thread_policy=prefer
If the flavor does not specify a CPU thread policy then the CPU thread policy
specified by the image (if any) will be used. If both the flavor and image
@@ -244,59 +430,82 @@ an exception will be raised.
.. note::
- There is no correlation required between the NUMA topology exposed in the
- instance and how the instance is actually pinned on the host. This is by
- design. See this `invalid bug
- `_ for more information.
+ For more information about image metadata, refer to the `Image metadata`_
+ guide.
-For more information about image metadata, refer to the `Image metadata`_
-guide.
+.. _emulator-thread-pinning-policies:
Customizing instance emulator thread pinning policies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-When guests need dedicated vCPU allocation, it may not be acceptable to allow
-emulator threads to steal time from real-time vCPUs.
-
-In order to achieve emulator thread pinning, configure the
-``hw:emulator_threads_policy`` flavor extra spec. Additionally,
-``hw:cpu_policy`` needs to be set to ``dedicated``. The default value for
-``hw:emulator_threads_policy`` is ``share``.
+.. important::
-If you want to tell nova to reserve a dedicated CPU per instance for emulator
-thread pinning, configure ``hw:emulator_threads_policy`` as ``isolate``.
+ The functionality described below requires the use of pinned instances and
+ is therefore currently only supported by the libvirt/KVM driver and requires
+ :ref:`some host configuration ` for this to work.
+ Hyper-V does not support CPU pinning.
+
+In addition to the work of the guest OS and applications running in an
+instance, there is a small amount of overhead associated with the underlying
+hypervisor. By default, these overhead tasks - known collectively as emulator
+threads - run on the same host CPUs as the instance itself and will result in a
+minor performance penalty for the instance. This is not usually an issue,
+however, for things like real-time instances, it may not be acceptable for
+emulator thread to steal time from instance CPUs.
+
+Emulator thread policies can be used to ensure emulator threads are run on
+cores separate from those used by the instance. There are two policies:
+``isolate`` and ``share``. The default is to run the emulator threads on the
+same core. The ``isolate`` emulator thread policy is used to specify that
+emulator threads for a given instance should be run on their own unique core,
+chosen from one of the host cores listed in
+:oslo.config:option:`compute.cpu_dedicated_set`. To configure a flavor to use
+the ``isolate`` emulator thread policy, run:
.. code-block:: console
- $ openstack flavor set m1.large \
+ $ openstack flavor set $FLAVOR \
--property hw:cpu_policy=dedicated \
--property hw:emulator_threads_policy=isolate
-An instance spawned with these settings will have a dedicated physical CPU
-which is chosen from the ``vcpu_pin_set`` in addition to the physical CPUs
-which are reserved for the vCPUs.
-
-If you want to tell nova to pin the emulator threads to a shared set of
-dedicated CPUs, configure ``hw:emulator_threads_policy`` as ``share``.
+The ``share`` policy is used to specify that emulator threads from a given
+instance should be run on the pool of host cores listed in
+:oslo.config:option:`compute.cpu_shared_set` if configured, else across all
+pCPUs of the instance.
+To configure a flavor to use the ``share`` emulator thread policy, run:
.. code-block:: console
- $ openstack flavor set m1.large \
+ $ openstack flavor set $FLAVOR \
--property hw:cpu_policy=dedicated \
--property hw:emulator_threads_policy=share
-Additionally, set ``[compute]/cpu_shared_set`` in ``/etc/nova/nova.conf`` to
-the set of host CPUs that should be used for best-effort CPU resources.
-
-.. code-block:: console
+The above behavior can be summarized in this helpful table:
+
+.. list-table::
+ :header-rows: 1
+ :stub-columns: 1
+
+ * -
+ - :oslo.config:option:`compute.cpu_shared_set` set
+ - :oslo.config:option:`compute.cpu_shared_set` unset
+ * - ``hw:emulator_treads_policy`` unset (default)
+ - Pinned to all of the instance's pCPUs
+ - Pinned to all of the instance's pCPUs
+ * - ``hw:emulator_threads_policy`` = ``share``
+ - Pinned to :oslo.config:option:`compute.cpu_shared_set`
+ - Pinned to all of the instance's pCPUs
+ * - ``hw:emulator_threads_policy`` = ``isolate``
+ - Pinned to a single pCPU distinct from the instance's pCPUs
+ - Pinned to a single pCPU distinct from the instance's pCPUs
- # crudini --set /etc/nova/nova.conf compute cpu_shared_set 4,5,8-11
+.. note::
-For more information about the syntax for ``hw:emulator_threads_policy``,
-refer to the :doc:`/admin/flavors` guide.
+ For more information about the syntax for ``hw:emulator_threads_policy``,
+ refer to :nova:extra-spec:`the documentation `.
Customizing instance CPU topologies
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------
.. important::
@@ -322,17 +531,17 @@ sockets.
Some workloads benefit from a custom topology. For example, in some operating
systems, a different license may be needed depending on the number of CPU
-sockets. To configure a flavor to use a maximum of two sockets, run:
+sockets. To configure a flavor to use two sockets, run:
.. code-block:: console
- $ openstack flavor set m1.large --property hw:cpu_sockets=2
+ $ openstack flavor set $FLAVOR --property hw:cpu_sockets=2
Similarly, to configure a flavor to use one core and one thread, run:
.. code-block:: console
- $ openstack flavor set m1.large \
+ $ openstack flavor set $FLAVOR \
--property hw:cpu_cores=1 \
--property hw:cpu_threads=1
@@ -347,22 +556,25 @@ Similarly, to configure a flavor to use one core and one thread, run:
with ten cores fails.
For more information about the syntax for ``hw:cpu_sockets``, ``hw:cpu_cores``
-and ``hw:cpu_threads``, refer to the :doc:`/admin/flavors` guide.
+and ``hw:cpu_threads``, refer to :doc:`/configuration/extra-specs`.
It is also possible to set upper limits on the number of sockets, cores, and
threads used. Unlike the hard values above, it is not necessary for this exact
number to used because it only provides a limit. This can be used to provide
some flexibility in scheduling, while ensuring certain limits are not
-exceeded. For example, to ensure no more than two sockets are defined in the
-instance topology, run:
+exceeded. For example, to ensure no more than two sockets, eight cores and one
+thread are defined in the instance topology, run:
.. code-block:: console
- $ openstack flavor set m1.large --property hw:cpu_max_sockets=2
+ $ openstack flavor set $FLAVOR \
+ --property hw:cpu_max_sockets=2 \
+ --property hw:cpu_max_cores=8 \
+ --property hw:cpu_max_threads=1
For more information about the syntax for ``hw:cpu_max_sockets``,
-``hw:cpu_max_cores``, and ``hw:cpu_max_threads``, refer to the
-:doc:`/admin/flavors` guide.
+``hw:cpu_max_cores``, and ``hw:cpu_max_threads``, refer to
+:doc:`/configuration/extra-specs`.
Applications are frequently packaged as images. For applications that prefer
certain CPU topologies, configure image metadata to hint that created instances
@@ -371,7 +583,7 @@ request a two-socket, four-core per socket topology, run:
.. code-block:: console
- $ openstack image set [IMAGE_ID] \
+ $ openstack image set $IMAGE \
--property hw_cpu_sockets=2 \
--property hw_cpu_cores=4
@@ -381,7 +593,7 @@ maximum of one thread, run:
.. code-block:: console
- $ openstack image set [IMAGE_ID] \
+ $ openstack image set $IMAGE \
--property hw_cpu_max_sockets=2 \
--property hw_cpu_max_threads=1
@@ -394,10 +606,82 @@ topologies that might, for example, incur an additional licensing fees.
For more information about image metadata, refer to the `Image metadata`_
guide.
+.. _configure-libvirt-pinning:
+
+Configuring libvirt compute nodes for CPU pinning
+-------------------------------------------------
+
+.. versionchanged:: 20.0.0
+
+ Prior to 20.0.0 (Train), it was not necessary to explicitly configure hosts
+ for pinned instances. However, it was not possible to place pinned instances
+ on the same host as unpinned CPUs, which typically meant hosts had to be
+ grouped into host aggregates. If this was not done, unpinned instances would
+ continue floating across all enabled host CPUs, even those that some
+ instance CPUs were pinned to. Starting in 20.0.0, it is necessary to
+ explicitly identify the host cores that should be used for pinned instances.
+
+Nova treats host CPUs used for unpinned instances differently from those used
+by pinned instances. The former are tracked in placement using the ``VCPU``
+resource type and can be overallocated, while the latter are tracked using the
+``PCPU`` resource type. By default, nova will report all host CPUs as ``VCPU``
+inventory, however, this can be configured using the
+:oslo.config:option:`compute.cpu_shared_set` config option, to specify which
+host CPUs should be used for ``VCPU`` inventory, and the
+:oslo.config:option:`compute.cpu_dedicated_set` config option, to specify which
+host CPUs should be used for ``PCPU`` inventory.
+
+Consider a compute node with a total of 24 host physical CPU cores with
+hyperthreading enabled. The operator wishes to reserve 1 physical CPU core and
+its thread sibling for host processing (not for guest instance use).
+Furthermore, the operator wishes to use 8 host physical CPU cores and their
+thread siblings for dedicated guest CPU resources. The remaining 15 host
+physical CPU cores and their thread siblings will be used for shared guest vCPU
+usage, with an 8:1 allocation ratio for those physical processors used for
+shared guest CPU resources.
+
+The operator could configure ``nova.conf`` like so::
+
+ [DEFAULT]
+ cpu_allocation_ratio=8.0
+
+ [compute]
+ cpu_dedicated_set=2-17
+ cpu_shared_set=18-47
+
+The virt driver will construct a provider tree containing a single resource
+provider representing the compute node and report inventory of ``PCPU`` and
+``VCPU`` for this single provider accordingly::
+
+ COMPUTE NODE provider
+ PCPU:
+ total: 16
+ reserved: 0
+ min_unit: 1
+ max_unit: 16
+ step_size: 1
+ allocation_ratio: 1.0
+ VCPU:
+ total: 30
+ reserved: 0
+ min_unit: 1
+ max_unit: 30
+ step_size: 1
+ allocation_ratio: 8.0
+
+Instances using the ``dedicated`` CPU policy or an explicit ``PCPU`` resource
+request, ``PCPU`` inventory will be consumed. Instances using the ``shared``
+CPU policy, meanwhile, will consume ``VCPU`` inventory.
+
+.. note::
+
+ ``PCPU`` and ``VCPU`` allocations are currently combined to calculate the
+ value for the ``cores`` quota class.
+
.. _configure-hyperv-numa:
Configuring Hyper-V compute nodes for instance NUMA policies
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------------------
Hyper-V is configured by default to allow instances to span multiple NUMA
nodes, regardless if the instances have been configured to only span N NUMA
@@ -439,6 +723,6 @@ memory allocation turned on. The Hyper-V driver will ignore the configured
instances with a NUMA topology.
.. Links
-.. _`Image metadata`: https://docs.openstack.org/image-guide/image-metadata.html
+.. _`Image metadata`: https://docs.openstack.org/image-guide/introduction.html#image-metadata
.. _`discussion`: http://lists.openstack.org/pipermail/openstack-dev/2016-March/090367.html
.. _`MTTCG project`: http://wiki.qemu.org/Features/tcg-multithread
diff --git a/doc/source/admin/emulated-tpm.rst b/doc/source/admin/emulated-tpm.rst
new file mode 100644
index 00000000000..5a1830e1a1f
--- /dev/null
+++ b/doc/source/admin/emulated-tpm.rst
@@ -0,0 +1,131 @@
+=======================================
+Emulated Trusted Platform Module (vTPM)
+=======================================
+
+.. versionadded:: 22.0.0 (Victoria)
+
+Starting in the 22.0.0 (Victoria) release, Nova supports adding an emulated
+virtual `Trusted Platform Module`__ (vTPM) to guests.
+
+.. __: https://en.wikipedia.org/wiki/Trusted_Platform_Module
+
+
+Enabling vTPM
+-------------
+
+The following are required on each compute host wishing to support the vTPM
+feature:
+
+* Currently vTPM is only supported when using the libvirt compute driver with a
+ :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``.
+
+* A `key manager service`__, such as `barbican`__, must be configured to store
+ secrets used to encrypt the virtual device files at rest.
+
+* The swtpm__ binary and associated libraries__.
+
+* Set the :oslo.config:option:`libvirt.swtpm_enabled` config option to
+ ``True``. This will enable support for both TPM version 1.2 and 2.0.
+
+With the above requirements satisfied, verify vTPM support by inspecting the
+traits on the compute node's resource provider:
+
+.. code:: bash
+
+ $ COMPUTE_UUID=$(openstack resource provider list --name $HOST -f value -c uuid)
+ $ openstack resource provider trait list $COMPUTE_UUID | grep SECURITY_TPM
+ | COMPUTE_SECURITY_TPM_1_2 |
+ | COMPUTE_SECURITY_TPM_2_0 |
+
+.. __: https://docs.openstack.org/api-guide/key-manager/
+.. __: https://docs.openstack.org/barbican/latest/
+.. __: https://github.com/stefanberger/swtpm/wiki
+.. __: https://github.com/stefanberger/libtpms/
+
+
+Configuring a flavor or image
+-----------------------------
+
+A vTPM can be requested on a server via flavor extra specs or image metadata
+properties. There are two versions supported - 1.2 and 2.0 - and two models -
+TPM Interface Specification (TIS) and Command-Response Buffer (CRB). The CRB
+model is only supported with version 2.0.
+
+.. list-table::
+ :header-rows: 1
+
+ * - Flavor extra_specs
+ - Image metadata
+ - Description
+ * - ``hw:tpm_version``
+ - ``hw_tpm_version``
+ - Specify the TPM version, ``1.2`` or ``2.0``. Required if requesting a
+ vTPM.
+ * - ``hw:tpm_model``
+ - ``hw_tpm_model``
+ - Specify the TPM model, ``tpm-tis`` (the default) or ``tpm-crb`` (only
+ valid with version ``2.0``.
+
+For example, to configure a flavor to use the TPM 2.0 with the CRB model:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property hw:tpm_version=2.0 \
+ --property hw:tpm_model=tpm-crb
+
+Scheduling will fail if flavor and image supply conflicting values, or if model
+``tpm-crb`` is requested with version ``1.2``.
+
+Upon successful boot, the server should see a TPM device such as ``/dev/tpm0``
+which can be used in the same manner as a hardware TPM.
+
+
+Limitations
+-----------
+
+* Only server operations performed by the server owner are supported, as the
+ user's credentials are required to unlock the virtual device files on the
+ host. Thus the admin may need to decide whether to grant the user additional
+ policy roles; if not, those operations are effectively disabled.
+
+* Live migration, evacuation, shelving and rescuing of servers with vTPMs is
+ not currently supported.
+
+
+Security
+--------
+
+With a hardware TPM, the root of trust is a secret known only to the TPM user.
+In contrast, an emulated TPM comprises a file on disk which the libvirt daemon
+must be able to present to the guest. At rest, this file is encrypted using a
+passphrase stored in a key manager service. The passphrase in the key manager
+is associated with the credentials of the owner of the server (the user who
+initially created it). The passphrase is retrieved and used by libvirt to
+unlock the emulated TPM data any time the server is booted.
+
+Although the above mechanism uses a libvirt secret__ that is both ``private``
+(can't be displayed via the libvirt API or ``virsh``) and ``ephemeral`` (exists
+only in memory, never on disk), it is theoretically possible for a sufficiently
+privileged user to retrieve the secret and/or vTPM data from memory.
+
+A full analysis and discussion of security issues related to emulated TPM is
+beyond the scope of this document.
+
+.. __: https://libvirt.org/formatsecret.html#SecretAttributes
+
+
+References
+----------
+
+* `TCG PC Client Specific TPM Interface Specification (TIS)`__
+* `TCG PC Client Platform TPM Profile (PTP) Specification`__
+* `QEMU docs on tpm`__
+* `Libvirt XML to request emulated TPM device`__
+* `Libvirt secret for usage type ``vtpm```__
+
+.. __: https://trustedcomputinggroup.org/resource/pc-client-work-group-pc-client-specific-tpm-interface-specification-tis/
+.. __: https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+.. __: https://qemu.readthedocs.io/en/latest/specs/tpm.html
+.. __: https://libvirt.org/formatdomain.html#elementsTpm
+.. __: https://libvirt.org/formatsecret.html#vTPMUsageType
diff --git a/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg b/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg
index 563dea780b0..f2934118829 100644
--- a/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg
+++ b/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg
@@ -467,12 +467,12 @@
Sheet.53
- Browses the url returned Http://novncip:port/?token=xyz
+ Browses the url returned Http://novncip:port/?path=%3Ftoken%3DxyzBrowses the url returnedHttp://novncip:port/?token=xyz
+ x="4" dy="1.2em" class="st13">Http://novncip:port/?path=%3Ftoken%3Dxyz
Sheet.28
diff --git a/doc/source/admin/file-backed-memory.rst b/doc/source/admin/file-backed-memory.rst
index 22fbc951821..dffb3de3833 100644
--- a/doc/source/admin/file-backed-memory.rst
+++ b/doc/source/admin/file-backed-memory.rst
@@ -46,14 +46,22 @@ Libvirt
capability requires libvirt version 4.4.0 or newer.
Qemu
- File-backed memory requires qemu version 2.6.0 or newer.Discard capability
+ File-backed memory requires qemu version 2.6.0 or newer. Discard capability
requires qemu version 2.10.0 or newer.
Memory overcommit
File-backed memory is not compatible with memory overcommit.
- ``ram_allocation_ratio`` must be set to ``1.0`` in ``nova.conf``, and the
- host must not be added to a host aggregate with ``ram_allocation_ratio``
- set to anything but ``1.0``.
+ :oslo.config:option:`ram_allocation_ratio` must be set to ``1.0`` in
+ ``nova.conf``, and the host must not be added to a :doc:`host aggregate
+ ` with ``ram_allocation_ratio`` set to anything but
+ ``1.0``.
+
+Reserved memory
+ When configured, file-backed memory is reported as total system memory to
+ placement, with RAM used as cache. Reserved memory corresponds to disk
+ space not set aside for file-backed memory.
+ :oslo.config:option:`reserved_host_memory_mb` should be set to ``0`` in
+ ``nova.conf``.
Huge pages
File-backed memory is not compatible with huge pages. Instances with huge
diff --git a/doc/source/admin/flavors.rst b/doc/source/admin/flavors.rst
index d59730a623c..abf939d1119 100644
--- a/doc/source/admin/flavors.rst
+++ b/doc/source/admin/flavors.rst
@@ -19,8 +19,10 @@ manage flavors. To see information for this command, run:
.. note::
Configuration rights can be delegated to additional users by redefining
- the access controls for ``os_compute_api:os-flavor-manage`` in
- ``/etc/nova/policy.json`` on the ``nova-api`` server.
+ the access controls for ``os_compute_api:os-flavor-manage:create``,
+ ``os_compute_api:os-flavor-manage:update`` and
+ ``os_compute_api:os-flavor-manage:delete`` in ``/etc/nova/policy.yaml``
+ on the ``nova-api`` server.
.. note::
@@ -111,12 +113,29 @@ and a new description as follows:
.. code-block:: console
- $ nova flavor-update FLAVOR DESCRIPTION
+ $ openstack --os-compute-api-version 2.55 flavor set --description
.. note::
- There are no commands to update a description of a flavor
- in the :command:`openstack` command currently (version 3.15.0).
+ The only field that can be updated is the description field.
+ Nova has historically intentionally not included an API to update
+ a flavor because that would be confusing for instances already
+ created with that flavor. Needing to change any other aspect of
+ a flavor requires deleting and/or creating a new flavor.
+
+ Nova stores a serialized version of the flavor associated with an
+ instance record in the ``instance_extra`` table. While nova supports
+ `updating flavor extra_specs`_ it does not update the embedded flavor
+ in existing instances. Nova does not update the embedded flavor
+ as the extra_specs change may invalidate the current placement
+ of the instance or alter the compute context that has been
+ created for the instance by the virt driver. For this reason
+ admins should avoid updating extra_specs for flavors used by
+ existing instances. A resize can be used to update existing
+ instances if required but as a resize performs a cold migration
+ it is not transparent to a tenant.
+
+.. _updating flavor extra_specs: https://docs.openstack.org/api-ref/compute/?expanded=#update-an-extra-spec-for-a-flavor
Delete a flavor
---------------
diff --git a/doc/source/admin/huge-pages.rst b/doc/source/admin/huge-pages.rst
index e53a58167e4..73f6c5dd2db 100644
--- a/doc/source/admin/huge-pages.rst
+++ b/doc/source/admin/huge-pages.rst
@@ -56,6 +56,7 @@ Enabling huge pages on the host
-------------------------------
.. important::
+
Huge pages may not be used on a host configured for file-backed memory. See
:doc:`file-backed-memory` for details
@@ -163,7 +164,7 @@ By default, an instance does not use huge pages for its underlying memory.
However, huge pages can bring important or required performance improvements
for some workloads. Huge pages must be requested explicitly through the use of
flavor extra specs or image metadata. To request an instance use huge pages,
-run:
+you can use the :nova:extra-spec:`hw:mem_page_size` flavor extra spec:
.. code-block:: console
@@ -178,7 +179,7 @@ are assumed. To request an instance to use 2 MB huge pages, run one of:
.. code-block:: console
- $ openstack flavor set m1.large --property hw:mem_page_size=2Mb
+ $ openstack flavor set m1.large --property hw:mem_page_size=2MB
.. code-block:: console
@@ -205,7 +206,7 @@ run:
$ openstack flavor set m1.large --property hw:mem_page_size=any
For more information about the syntax for ``hw:mem_page_size``, refer to
-:doc:`flavors`.
+:nova:extra-spec:`the documentation `.
Applications are frequently packaged as images. For applications that require
the IO performance improvements that huge pages provides, configure image
@@ -239,4 +240,4 @@ guide.
.. Links
.. _`Linux THP guide`: https://www.kernel.org/doc/Documentation/vm/transhuge.txt
.. _`Linux hugetlbfs guide`: https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
-.. _`Image metadata`: https://docs.openstack.org/image-guide/image-metadata.html
+.. _`Image metadata`: https://docs.openstack.org/image-guide/introduction.html#image-metadata
diff --git a/doc/source/admin/hw-machine-type.rst b/doc/source/admin/hw-machine-type.rst
new file mode 100644
index 00000000000..e8a0df87e4d
--- /dev/null
+++ b/doc/source/admin/hw-machine-type.rst
@@ -0,0 +1,137 @@
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+======================================================================
+hw_machine_type - Configuring and updating QEMU instance machine types
+======================================================================
+
+.. versionadded:: 12.0.0 (Liberty)
+
+.. versionchanged:: 23.0.0 (Wallaby)
+
+ The libvirt driver now records the machine type of an instance at start up
+ allowing the ``[libvirt]hw_machine_type`` configurable to change over time
+ without impacting existing instances.
+
+ Added ``nova-manage`` commands to control the machine_type of an instance.
+
+.. note::
+
+ The following only applies to environments using libvirt compute hosts.
+
+Introduction
+------------
+
+QEMU's machine type concept can be thought of as a virtual chipset that
+provides certain default devices (e.g. PCIe graphics card, Ethernet controller,
+SATA controller, etc). QEMU supports two main variants of "machine type" for
+x86 hosts: (a) ``pc``, which corresponds to Intel's I440FX chipset (released in
+1996) and (b) ``q35``, which corresponds to Intel's 82Q35 chipset (released in
+2007). For AArch64 hosts, the machine type is called: ``virt``.
+
+The ``pc`` machine type is considered legacy, and does not support many modern
+features. Although at this time of writing, upstream QEMU has not reached an
+agreement to remove new versioned variants of the ``pc`` machine type, some
+long-term stable Linux distributions (CentOS, RHEL, possibly others) are moving
+to support ``q35`` only.
+
+Configure
+---------
+
+For end users the machine type of an instance is controlled by the selection of
+an image with the `hw_machine_type image metadata property`__ set.
+
+.. __: https://docs.openstack.org/glance/latest/admin/useful-image-properties.html
+
+.. code-block:: shell
+
+ $ openstack image set --property hw_machine_type=q35 $IMAGE
+
+The libvirt virt driver supports the configuration of a per compute host
+default machine type via the :oslo.config:option:`libvirt.hw_machine_type`
+option. Providing a default machine type per host architecture to be used when
+no corresponding ``hw_machine_type`` image property is provided for the
+instance.
+
+When this option is not defined the libvirt driver relies on the following
+`hardcoded dictionary`__ of default machine types per architecture:
+
+.. __: https://github.com/openstack/nova/blob/dc93e3b510f53d5b2198c8edd22528f0c899617e/nova/virt/libvirt/utils.py#L631-L638
+
+.. code-block:: python
+
+ default_mtypes = {
+ obj_fields.Architecture.ARMV7: "virt",
+ obj_fields.Architecture.AARCH64: "virt",
+ obj_fields.Architecture.S390: "s390-ccw-virtio",
+ obj_fields.Architecture.S390X: "s390-ccw-virtio",
+ obj_fields.Architecture.I686: "pc",
+ obj_fields.Architecture.X86_64: "pc",
+ }
+
+Update
+------
+
+Prior to the Wallaby (23.0.0) release the
+:oslo.config:option:`libvirt.hw_machine_type` option had to remain static once
+set for the lifetime of a deployment. This was due to the machine type of
+instances without a ``hw_machine_type`` image property using the newly
+configured machine types after a hard reboot or migration This could in turn
+break the internal ABI of the instance when changing between underlying machine
+types such as ``pc`` to ``q35``.
+
+From the Wallaby (23.0.0) release it is now possible to change the
+:oslo.config:option:`libvirt.hw_machine_type` config once all instances have a
+machine type recorded within the system metadata of the instance.
+
+To allow this the libvirt driver will now attempt to record the machine type
+for any instance that doesn't already have it recorded during start up of the
+compute service or initial spawn of an instance. This should ensure a machine
+type is recorded for all instances after an upgrade to Wallaby that are not in
+a ``SHELVED_OFFLOADED`` state.
+
+To record a machine type for instances in a ``SHELVED_OFFLOADED`` state after
+an upgrade to Wallaby a new :program:`nova-manage` command has been introduced
+to initially record the machine type of an instance.
+
+.. code-block:: shell
+
+ $ nova-manage libvirt update_machine_type $instance $machine_type
+
+This command can also be used later to update the specific machine type used by
+the instance. An additional :program:`nova-manage` command is also available to
+fetch the machine type of a specific instance:
+
+.. code-block:: shell
+
+ $ nova-manage libvirt get_machine_type $instance
+
+To confirm that all instances within an environment or a specific cell have had
+a machine type recorded another :program:`nova-manage` command can be used:
+
+.. code-block:: shell
+
+ $ nova-manage libvirt list_unset_machine_type
+
+The logic behind this command is also used by a new :program:`nova-status`
+upgrade check that will fail with a warning when instances without a machine
+type set exist in an environment.
+
+.. code-block:: shell
+
+ $ nova-status upgrade check
+
+Once it has been verified that all instances within the environment or specific
+cell have had a machine type recorded then the
+:oslo.config:option:`libvirt.hw_machine_type` can be updated without impacting
+existing instances.
diff --git a/doc/source/admin/image-caching.rst b/doc/source/admin/image-caching.rst
new file mode 100644
index 00000000000..a5475c15bbe
--- /dev/null
+++ b/doc/source/admin/image-caching.rst
@@ -0,0 +1,113 @@
+=============
+Image Caching
+=============
+
+Nova supports caching base images on compute nodes when using a
+`supported virt driver`_.
+
+.. _supported virt driver: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_cache_images
+
+What is Image Caching?
+----------------------
+
+In order to understand what image caching is and why it is beneficial,
+it helps to be familiar with the process by which an instance is
+booted from a given base image. When a new instance is created on a
+compute node, the following general steps are performed by the compute
+manager in conjunction with the virt driver:
+
+#. Download the base image from glance
+#. Copy or COW the base image to create a new root disk image for the instance
+#. Boot the instance using the new root disk image
+
+The first step involves downloading the entire base image to the local
+disk on the compute node, which could involve many gigabytes of
+network traffic, storage, and many minutes of latency between the
+start of the boot process and actually running the instance. When the
+virt driver supports image caching, step #1 above may be skipped if
+the base image is already present on the compute node. This is most
+often the case when another instance has been booted on that node from
+the same base image recently. If present, the download operation can
+be skipped, which greatly reduces the time-to-boot for the second and
+subsequent instances that use the same base image, as well as avoids
+load on the glance server and the network connection.
+
+By default, the compute node will periodically scan the images it has
+cached, looking for base images that are not used by any instances on
+the node that are older than a configured lifetime (24 hours by
+default). Those unused images are deleted from the cache directory
+until they are needed again.
+
+For more information about configuring image cache behavior, see the
+documentation for the configuration options in the
+:oslo.config:group:`image_cache` group.
+
+.. note::
+
+ Some ephemeral backend drivers may not use or need image caching,
+ or may not behave in the same way as others. For example, when
+ using the ``rbd`` backend with the ``libvirt`` driver and a shared
+ pool with glance, images are COW'd at the storage level and thus
+ need not be downloaded (and thus cached) at the compute node at
+ all.
+
+Image Caching Resource Accounting
+---------------------------------
+
+Generally the size of the image cache is not part of the data Nova
+includes when reporting available or consumed disk space. This means
+that when ``nova-compute`` reports 100G of total disk space, the
+scheduler will assume that 100G of instances may be placed
+there. Usually disk is the most plentiful resource and thus the last
+to be exhausted, so this is often not problematic. However, if many
+instances are booted from distinct images, all of which need to be
+cached in addition to the disk space used by the instances themselves,
+Nova may overcommit the disk unintentionally by failing to consider
+the size of the image cache.
+
+There are two approaches to addressing this situation:
+
+#. **Mount the image cache as a separate filesystem**. This will
+ cause Nova to report the amount of disk space available purely to
+ instances, independent of how much is consumed by the cache. Nova
+ will continue to disregard the size of the image cache and, if the
+ cache space is exhausted, builds will fail. However, available
+ disk space for instances will be correctly reported by
+ ``nova-compute`` and accurately considered by the scheduler.
+
+#. **Enable optional reserved disk amount behavior**. The
+ configuration workaround
+ :oslo.config:option:`workarounds.reserve_disk_resource_for_image_cache`
+ will cause ``nova-compute`` to periodically update the reserved disk
+ amount to include the statically configured value, as well as the
+ amount currently consumed by the image cache. This will cause the
+ scheduler to see the available disk space decrease as the image
+ cache grows. This is not updated synchronously and thus is not a
+ perfect solution, but should vastly increase the scheduler's
+ visibility resulting in better decisions. (Note this solution is
+ currently libvirt-specific)
+
+As above, not all backends and virt drivers use image caching, and
+thus a third option may be to consider alternative infrastructure to
+eliminate this problem altogether.
+
+Image pre-caching
+-----------------
+
+It may be beneficial to pre-cache images on compute nodes in order to
+achieve low time-to-boot latency for new instances immediately. This
+is often useful when rolling out a new version of an application where
+downtime is important and having the new images already available on
+the compute nodes is critical.
+
+Nova provides (since the Ussuri release) a mechanism to request that
+images be cached without having to boot an actual instance on a
+node. This best-effort service operates at the host aggregate level in
+order to provide an efficient way to indicate that a large number of
+computes should receive a given set of images. If the computes that
+should pre-cache an image are not already in a defined host aggregate,
+that must be done first.
+
+For information on how to perform aggregate-based image pre-caching,
+see the :ref:`image-caching-aggregates` section of the Host aggregates
+documentation.
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index f68e006189c..960034ab8ff 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -11,38 +11,148 @@ Compute does not include virtualization software. Instead, it defines drivers
that interact with underlying virtualization mechanisms that run on your host
operating system, and exposes functionality over a web-based API.
+
+Overview
+--------
+
+To effectively administer compute, you must understand how the different
+installed nodes interact with each other. Compute can be installed in many
+different ways using multiple servers, but generally multiple compute nodes
+control the virtual servers and a cloud controller node contains the remaining
+Compute services.
+
+The Compute cloud works using a series of daemon processes named ``nova-*``
+that exist persistently on the host machine. These binaries can all run on the
+same machine or be spread out on multiple boxes in a large deployment. The
+responsibilities of services and drivers are:
+
+.. rubric:: Services
+
+``nova-api``
+ Receives XML requests and sends them to the rest of the system. A WSGI app
+ routes and authenticates requests. Supports the OpenStack Compute APIs. A
+ ``nova.conf`` configuration file is created when Compute is installed.
+
+.. todo::
+
+ Describe nova-api-metadata, nova-api-os-compute, nova-serialproxy and
+ nova-spicehtml5proxy
+
+ nova-console, nova-dhcpbridge and nova-xvpvncproxy are all deprecated for
+ removal so they can be ignored.
+
+``nova-compute``
+ Manages virtual machines. Loads a Service object, and exposes the public
+ methods on ComputeManager through a Remote Procedure Call (RPC).
+
+``nova-conductor``
+ Provides database-access support for compute nodes (thereby reducing security
+ risks).
+
+``nova-scheduler``
+ Dispatches requests for new virtual machines to the correct node.
+
+``nova-novncproxy``
+ Provides a VNC proxy for browsers, allowing VNC consoles to access virtual
+ machines.
+
+.. note::
+
+ Some services have drivers that change how the service implements its core
+ functionality. For example, the ``nova-compute`` service supports drivers
+ that let you choose which hypervisor type it can use.
+
+.. toctree::
+ :maxdepth: 2
+
+ manage-volumes
+ flavors
+ default-ports
+ admin-password-injection
+ manage-the-cloud
+ manage-logs
+ root-wrap-reference
+ configuring-migrations
+ live-migration-usage
+ remote-console-access
+ service-groups
+ node-down
+ scheduling
+ upgrades
+
+
+Advanced configuration
+----------------------
+
+OpenStack clouds run on platforms that differ greatly in the capabilities that
+they provide. By default, the Compute service seeks to abstract the underlying
+hardware that it runs on, rather than exposing specifics about the underlying
+host platforms. This abstraction manifests itself in many ways. For example,
+rather than exposing the types and topologies of CPUs running on hosts, the
+service exposes a number of generic CPUs (virtual CPUs, or vCPUs) and allows
+for overcommitting of these. In a similar manner, rather than exposing the
+individual types of network devices available on hosts, generic
+software-powered network ports are provided. These features are designed to
+allow high resource utilization and allows the service to provide a generic
+cost-effective and highly scalable cloud upon which to build applications.
+
+This abstraction is beneficial for most workloads. However, there are some
+workloads where determinism and per-instance performance are important, if not
+vital. In these cases, instances can be expected to deliver near-native
+performance. The Compute service provides features to improve individual
+instance for these kind of workloads.
+
+.. include:: /common/numa-live-migration-warning.txt
+
+.. toctree::
+ :maxdepth: 2
+
+ pci-passthrough
+ cpu-topologies
+ real-time
+ huge-pages
+ virtual-gpu
+ file-backed-memory
+ ports-with-resource-requests
+ virtual-persistent-memory
+ emulated-tpm
+ uefi
+ secure-boot
+ sev
+ managing-resource-providers
+ resource-limits
+ cpu-models
+ libvirt-misc
+
+
+Additional guides
+-----------------
+
+.. TODO(mriedem): This index page has a lot of content which should be
+ organized into groups for things like configuration, operations,
+ troubleshooting, etc.
+
.. toctree::
:maxdepth: 2
- admin-password-injection.rst
- adv-config.rst
- arch.rst
- availability-zones.rst
- configuring-migrations.rst
- cpu-topologies.rst
- default-ports.rst
- evacuate.rst
- flavors.rst
- huge-pages.rst
- live-migration-usage.rst
- manage-logs.rst
- manage-the-cloud.rst
- manage-users.rst
- manage-volumes.rst
- migration.rst
- migrate-instance-with-snapshot.rst
- networking-nova.rst
- networking.rst
- node-down.rst
- pci-passthrough.rst
- quotas2.rst
- quotas.rst
- remote-console-access.rst
- root-wrap-reference.rst
- security-groups.rst
- security.rst
- service-groups.rst
- services.rst
- ssh-configuration.rst
- support-compute.rst
- system-admin.rst
+ aggregates
+ arch
+ availability-zones
+ cells
+ config-drive
+ configuration/index
+ evacuate
+ image-caching
+ metadata-service
+ migration
+ migrate-instance-with-snapshot
+ networking
+ quotas
+ security-groups
+ security
+ services
+ ssh-configuration
+ support-compute
+ secure-live-migration-with-qemu-native-tls
+ vendordata
+ hw-machine-type
diff --git a/doc/source/admin/libvirt-misc.rst b/doc/source/admin/libvirt-misc.rst
new file mode 100644
index 00000000000..87dbe18ea47
--- /dev/null
+++ b/doc/source/admin/libvirt-misc.rst
@@ -0,0 +1,140 @@
+======================
+Other libvirt features
+======================
+
+The libvirt driver supports a large number of additional features that don't
+warrant their own section. These are gathered here.
+
+
+Guest agent support
+-------------------
+
+Guest agents enable optional access between compute nodes and guests through a
+socket, using the QMP protocol.
+
+To enable this feature, you must set ``hw_qemu_guest_agent=yes`` as a metadata
+parameter on the image you wish to use to create the guest-agent-capable
+instances from. You can explicitly disable the feature by setting
+``hw_qemu_guest_agent=no`` in the image metadata.
+
+
+.. _extra-specs-watchdog-behavior:
+
+Watchdog behavior
+-----------------
+
+.. versionchanged:: 15.0.0 (Ocata)
+
+ Add support for the ``disabled`` option.
+
+A virtual watchdog device can be used to keep an eye on the guest server and
+carry out a configured action if the server hangs. The watchdog uses the
+i6300esb device (emulating a PCI Intel 6300ESB). Watchdog behavior can be
+configured using the :nova:extra-spec:`hw:watchdog_action` flavor extra spec or
+equivalent image metadata property. If neither the extra spec not the image
+metadata property are specified, the watchdog is disabled.
+
+For example, to enable the watchdog and configure it to forcefully reset the
+guest in the event of a hang, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR --property hw:watchdog_action=reset
+
+.. note::
+
+ Watchdog behavior set using the image metadata property will override
+ behavior set using the flavor extra spec.
+
+
+.. _extra-specs-random-number-generator:
+
+Random number generator
+-----------------------
+
+.. versionchanged:: 21.0.0 (Ussuri)
+
+ Random number generators are now enabled by default for instances.
+
+Operating systems require good sources of entropy for things like cryptographic
+software. If a random-number generator device has been added to the instance
+through its image properties, the device can be enabled and configured using
+the :nova:extra-spec:`hw_rng:allowed`, :nova:extra-spec:`hw_rng:rate_bytes` and
+:nova:extra-spec:`hw_rng:rate_period` flavor extra specs.
+
+To configure for example a byte rate of 5 bytes per period and a period of 1000
+mSec (1 second), run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property hw_rng:rate_bytes=5 \
+ --property hw_rng:rate_period=1000
+
+Alternatively, to disable the random number generator, run:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR --property hw_rng:allowed=false
+
+The presence of separate byte rate and rate period configurables is
+intentional. As noted in the `QEMU docs`__, a smaller rate and larger period
+minimizes the opportunity for malicious guests to starve other guests of
+entropy but at the cost of responsiveness. Conversely, larger rates and smaller
+periods will increase the burst rate but at the potential cost of warping
+resource consumption in favour of a greedy guest.
+
+.. __: https://wiki.qemu.org/Features/VirtIORNG#Effect_of_the_period_parameter
+
+
+.. _extra-specs-performance-monitoring-unit:
+
+Performance Monitoring Unit (vPMU)
+----------------------------------
+
+.. versionadded:: 20.0.0 (Train)
+
+If nova is deployed with the libvirt virt driver and
+:oslo.config:option:`libvirt.virt_type` is set to ``qemu`` or ``kvm``, a
+virtual performance monitoring unit (vPMU) can be enabled or disabled for an
+instance using the :nova:extra-spec:`hw:pmu` flavor extra spec or ``hw_pmu``
+image metadata property.
+If the vPMU is not explicitly enabled or disabled via
+the flavor or image, its presence is left to QEMU to decide.
+
+For example, to explicitly disable the vPMU, run:
+
+.. code-block:: console
+
+ $ openstack flavor set FLAVOR-NAME --property hw:pmu=false
+
+The vPMU is used by tools like ``perf`` in the guest to provide more accurate
+information for profiling application and monitoring guest performance.
+For :doc:`real time ` workloads, the emulation of a vPMU can
+introduce additional latency which would be undesirable. If the telemetry it
+provides is not required, the vPMU can be disabled. For most workloads the
+default of unset (enabled) will be correct.
+
+
+.. _extra-specs-hiding-hypervisor-signature:
+
+Hiding hypervisor signature
+---------------------------
+
+.. versionadded:: 18.0.0 (Rocky)
+
+.. versionchanged:: 21.0.0 (Ussuri)
+
+ Prior to the Ussuri release, this was called ``hide_hypervisor_id``. An
+ alias is provided to provide backwards compatibility.
+
+Some hypervisors add a signature to their guests. While the presence of the
+signature can enable some paravirtualization features on the guest, it can also
+have the effect of preventing some drivers from loading. You can hide this
+signature by setting the :nova:extra-spec:`hw:hide_hypervisor_id` to true.
+
+For example, to hide your signature from the guest OS, run:
+
+.. code:: console
+
+ $ openstack flavor set $FLAVOR --property hw:hide_hypervisor_id=true
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index bf848cf852b..783ab5e27c2 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -67,9 +67,8 @@ Manual selection of the destination host
+----+------------------+-------+----------+---------+-------+----------------------------+
| 3 | nova-conductor | HostA | internal | enabled | up | 2017-02-18T09:42:29.000000 |
| 4 | nova-scheduler | HostA | internal | enabled | up | 2017-02-18T09:42:26.000000 |
- | 5 | nova-consoleauth | HostA | internal | enabled | up | 2017-02-18T09:42:29.000000 |
- | 6 | nova-compute | HostB | nova | enabled | up | 2017-02-18T09:42:29.000000 |
- | 7 | nova-compute | HostC | nova | enabled | up | 2017-02-18T09:42:29.000000 |
+ | 5 | nova-compute | HostB | nova | enabled | up | 2017-02-18T09:42:29.000000 |
+ | 6 | nova-compute | HostC | nova | enabled | up | 2017-02-18T09:42:29.000000 |
+----+------------------+-------+----------+---------+-------+----------------------------+
#. Check that ``HostC`` has enough resources for migration:
@@ -218,9 +217,25 @@ What to do when the migration times out
During the migration process, the instance may write to a memory page after
that page has been copied to the destination. When that happens, the same page
has to be copied again. The instance may write to memory pages faster than they
-can be copied, so that the migration cannot complete. The Compute service will
-cancel it when the ``live_migration_completion_timeout``, a configuration
-parameter, is reached.
+can be copied, so that the migration cannot complete. There are two optional
+actions, controlled by
+:oslo.config:option:`libvirt.live_migration_timeout_action`, which can be
+taken against a VM after
+:oslo.config:option:`libvirt.live_migration_completion_timeout` is reached:
+
+1. ``abort`` (default): The live migration operation will be cancelled after
+ the completion timeout is reached. This is similar to using API
+ ``DELETE /servers/{server_id}/migrations/{migration_id}``.
+
+2. ``force_complete``: The compute service will either pause the VM or trigger
+ post-copy depending on if post copy is enabled and available
+ (:oslo.config:option:`libvirt.live_migration_permit_post_copy` is set to
+ `True`). This is similar to using API
+ ``POST /servers/{server_id}/migrations/{migration_id}/action (force_complete)``.
+
+You can also read the
+:oslo.config:option:`libvirt.live_migration_timeout_action`
+configuration option help for more details.
The following remarks assume the KVM/Libvirt hypervisor.
@@ -238,16 +253,6 @@ out:
WARNING nova.virt.libvirt.migration [req-...] [instance: ...]
live migration not completed after 1800 sec
-The Compute service also cancels migrations when the memory copy seems to make
-no progress. Ocata disables this feature by default, but it can be enabled
-using the configuration parameter ``live_migration_progress_timeout``. Should
-this be the case, you may find the following message in the log:
-
-.. code-block:: console
-
- WARNING nova.virt.libvirt.migration [req-...] [instance: ...]
- live migration stuck for 150 sec
-
Addressing migration timeouts
-----------------------------
@@ -312,3 +317,7 @@ To make live-migration succeed, you have several options:
- Post-copy may lead to an increased page fault rate during migration,
which can slow the instance down.
+
+If live migrations routinely timeout or fail during cleanup operations due
+to the user token timing out, consider configuring nova to use
+:ref:`service user tokens `.
diff --git a/doc/source/admin/manage-the-cloud.rst b/doc/source/admin/manage-the-cloud.rst
index 33cc911a8b1..b6080bcfd90 100644
--- a/doc/source/admin/manage-the-cloud.rst
+++ b/doc/source/admin/manage-the-cloud.rst
@@ -6,7 +6,7 @@ Manage the cloud
.. toctree::
- common/nova-show-usage-statistics-for-hosts-instances.rst
+ common/nova-show-usage-statistics-for-hosts-instances
System administrators can use the :command:`openstack` to manage their clouds.
diff --git a/doc/source/admin/manage-users.rst b/doc/source/admin/manage-users.rst
deleted file mode 100644
index 41a925ff76f..00000000000
--- a/doc/source/admin/manage-users.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-.. _section_manage-compute-users:
-
-====================
-Manage Compute users
-====================
-
-Access to the Euca2ools (ec2) API is controlled by an access key and a secret
-key. The user's access key needs to be included in the request, and the request
-must be signed with the secret key. Upon receipt of API requests, Compute
-verifies the signature and runs commands on behalf of the user.
-
-To begin using Compute, you must create a user with the Identity service.
diff --git a/doc/source/admin/manage-volumes.rst b/doc/source/admin/manage-volumes.rst
index 1ac2a7a499a..a9d705a47aa 100644
--- a/doc/source/admin/manage-volumes.rst
+++ b/doc/source/admin/manage-volumes.rst
@@ -23,8 +23,8 @@ to the :cinder-doc:`block storage admin guide
` for more details about creating
multiattach-capable volumes.
-Boot from volume and attaching a volume to a server that is not
-SHELVED_OFFLOADED is supported. Ultimately the ability to perform
+:term:`Boot from volume ` and attaching a volume to a server
+that is not SHELVED_OFFLOADED is supported. Ultimately the ability to perform
these actions depends on the compute host and hypervisor driver that
is being used.
@@ -65,15 +65,152 @@ Testing
~~~~~~~
Continuous integration testing of the volume multiattach feature is done
-via the ``nova-multiattach`` job, defined in the `nova repository`_.
-
-The tests are defined in the `tempest repository`_.
-
-The CI job is setup to run with the **libvirt** compute driver and the **lvm**
-volume back end. It purposefully does not use the Pike Ubuntu Cloud Archive
-package mirror so that it gets qemu<2.10.
+via the ``tempest-full`` and ``tempest-slow`` jobs, which, along with the
+tests themselves, are defined in the `tempest repository`_.
.. _added support for multiattach volumes: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/multi-attach-volume.html
.. _recorded overview and demo: https://www.youtube.com/watch?v=hZg6wqxdEHk
-.. _nova repository: http://git.openstack.org/cgit/openstack/nova/tree/playbooks/legacy/nova-multiattach/run.yaml
.. _tempest repository: http://codesearch.openstack.org/?q=CONF.compute_feature_enabled.volume_multiattach&i=nope&files=&repos=tempest
+
+Managing volume attachments
+---------------------------
+
+During the lifecycle of an instance admins may need to check various aspects of
+how a given volume is mapped both to an instance and the underlying compute
+hosting the instance. This could even include refreshing different elements of
+the attachment to ensure the latest configuration changes within the
+environment have been applied.
+
+Checking an existing attachment
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Existing volume attachments can be checked using the following
+:python-openstackclient-doc:`OpenStack Client commands `:
+
+List all volume attachments for a given instance:
+
+.. code-block:: shell
+
+ $ openstack server volume list 216f9481-4c9d-4530-b865-51cedfa4b8e7
+ +--------------------------------------+----------+--------------------------------------+--------------------------------------+
+ | ID | Device | Server ID | Volume ID |
+ +--------------------------------------+----------+--------------------------------------+--------------------------------------+
+ | 8b9b3491-f083-4485-8374-258372f3db35 | /dev/vdb | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | 8b9b3491-f083-4485-8374-258372f3db35 |
+ +--------------------------------------+----------+--------------------------------------+--------------------------------------+
+
+List all volume attachments for a given instance with the Cinder volume
+attachment and Block Device Mapping UUIDs also listed with microversion >=2.89:
+
+.. code-block:: shell
+
+ $ openstack --os-compute-api-version 2.89 server volume list 216f9481-4c9d-4530-b865-51cedfa4b8e7
+ +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+
+ | Device | Server ID | Volume ID | Tag | Delete On Termination? | Attachment ID | BlockDeviceMapping UUID |
+ +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+
+ | /dev/vdb | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | 8b9b3491-f083-4485-8374-258372f3db35 | None | False | d338fb38-cfd5-461f-8753-145dcbdb6c78 | 4e957e6d-52f2-44da-8cf8-3f1ab755e26d |
+ +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+
+
+List all Cinder volume attachments for a given volume from microversion >=
+3.27:
+
+.. code-block:: shell
+
+ $ openstack --os-volume-api-version 3.27 volume attachment list --volume-id 8b9b3491-f083-4485-8374-258372f3db35
+ +--------------------------------------+--------------------------------------+--------------------------------------+----------+
+ | ID | Volume ID | Server ID | Status |
+ +--------------------------------------+--------------------------------------+--------------------------------------+----------+
+ | d338fb38-cfd5-461f-8753-145dcbdb6c78 | 8b9b3491-f083-4485-8374-258372f3db35 | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | attached |
+ +--------------------------------------+--------------------------------------+--------------------------------------+----------+
+
+Show the details of a Cinder volume attachment from microversion >= 3.27:
+
+.. code-block:: shell
+
+ $ openstack --os-volume-api-version 3.27 volume attachment show d338fb38-cfd5-461f-8753-145dcbdb6c78
+ +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | Field | Value |
+ +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | ID | d338fb38-cfd5-461f-8753-145dcbdb6c78 |
+ | Volume ID | 8b9b3491-f083-4485-8374-258372f3db35 |
+ | Instance ID | 216f9481-4c9d-4530-b865-51cedfa4b8e7 |
+ | Status | attached |
+ | Attach Mode | rw |
+ | Attached At | 2021-09-14T13:03:38.000000 |
+ | Detached At | |
+ | Properties | access_mode='rw', attachment_id='d338fb38-cfd5-461f-8753-145dcbdb6c78', auth_method='CHAP', auth_password='4XyNNFV2TLPhKXoP', auth_username='jsBMQhWZJXupA4eWHLQG', cacheable='False', driver_volume_type='iscsi', encrypted='False', qos_specs=, target_discovered='False', target_iqn='iqn.2010-10.org.openstack:volume-8b9b3491-f083-4485-8374-258372f3db35', target_lun='0', target_portal='192.168.122.99:3260', volume_id='8b9b3491-f083-4485-8374-258372f3db35' |
+ +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
+Refresh a volume attachment with nova-manage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 24.0.0 (Xena)
+
+Admins may also refresh an existing volume attachment using the following
+:program:`nova-manage` commands.
+
+.. note::
+
+ Users can also refresh volume attachments by shelving and later unshelving
+ their instances. The following is an alternative to that workflow and
+ useful for admins when having to mass refresh attachments across an
+ environment.
+
+.. note::
+
+ Future work will look into introducing an os-refresh admin API that will
+ include orchestrating the shutdown of an instance and refreshing volume
+ attachments among other things.
+
+To begin the admin can use the `volume_attachment show` subcommand to dump
+existing details of the attachment directly from the Nova database. This
+includes the stashed `connection_info` not shared by the API.
+
+.. code-block:: shell
+
+ $ nova-manage volume_attachment show 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 --json | jq .attachment_id
+ "d338fb38-cfd5-461f-8753-145dcbdb6c78"
+
+If the stored `connection_info` or `attachment_id` are incorrect then the
+admin may want to refresh the attachment to the compute host entirely by
+recreating the Cinder volume attachment record(s) and pulling down fresh
+`connection_info`. To do this we first need to ensure the instance is stopped:
+
+.. code-block:: shell
+
+ $ openstack server stop 216f9481-4c9d-4530-b865-51cedfa4b8e7
+
+Once stopped the host connector of the compute hosting the instance has to be
+fetched using the `volume_attachment get_connector` subcommand:
+
+.. code-block:: shell
+
+ root@compute $ nova-manage volume_attachment get_connector --json > connector.json
+
+.. note::
+
+ Future work will remove this requirement and incorperate the gathering of
+ the host connector into the main refresh command. Unfortunatley until then
+ it must remain a seperate manual step.
+
+We can then provide this connector to the `volume_attachment refresh`
+subcommand. This command will connect to the compute, disconnect any host
+volume connections, delete the existing Cinder volume attachment,
+recreate the volume attachment and finally update Nova's database.
+
+.. code-block:: shell
+
+ $ nova-manage volume_attachment refresh 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 connector.json
+
+The Cinder volume attachment and connection_info stored in the Nova database
+should now be updated:
+
+.. code-block:: shell
+
+ $ nova-manage volume_attachment show 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 --json | jq .attachment_id
+ "9ce46f49-5cfc-4c6c-b2f0-0287540d3246"
+
+The instance can then be restarted and the event list checked
+
+.. code-block:: shell
+
+ $ openstack server start $instance
diff --git a/doc/source/admin/managing-resource-providers.rst b/doc/source/admin/managing-resource-providers.rst
new file mode 100644
index 00000000000..27bfe20140a
--- /dev/null
+++ b/doc/source/admin/managing-resource-providers.rst
@@ -0,0 +1,216 @@
+==============================================
+Managing Resource Providers Using Config Files
+==============================================
+
+In order to facilitate management of resource provider information in the
+Placement API, Nova provides `a method`__ for admins to add custom inventory
+and traits to resource providers using YAML files.
+
+__ https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/provider-config-file.html
+
+.. note::
+
+ Only ``CUSTOM_*`` resource classes and traits may be managed this way.
+
+Placing Files
+-------------
+
+Nova-compute will search for ``*.yaml`` files in the path specified in
+:oslo.config:option:`compute.provider_config_location`. These files will be
+loaded and validated for errors on nova-compute startup. If there are any
+errors in the files, nova-compute will fail to start up.
+
+Administrators should ensure that provider config files have appropriate
+permissions and ownership. See the `specification`__ and `admin guide`__
+for more details.
+
+__ https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/provider-config-file.html
+__ https://docs.openstack.org/nova/latest/admin/managing-resource-providers.html
+
+.. note::
+
+ The files are loaded once at nova-compute startup and any changes or new
+ files will not be recognized until the next nova-compute startup.
+
+Examples
+--------
+
+Resource providers to target can be identified by either UUID or name. In
+addition, the value ``$COMPUTE_NODE`` can be used in the UUID field to
+identify all nodes managed by the service.
+
+If an entry does not include any additional inventory or traits, it will be
+logged at load time but otherwise ignored. In the case of a resource provider
+being identified by both ``$COMPUTE_NODE`` and individual UUID/name, the
+values in the ``$COMPUTE_NODE`` entry will be ignored for *that provider* only
+if the explicit entry includes inventory or traits.
+
+.. note::
+
+ In the case that a resource provider is identified more than once by
+ explicit UUID/name, the nova-compute service will fail to start. This
+ is a global requirement across all supplied ``provider.yaml`` files.
+
+.. code-block:: yaml
+
+ meta:
+ schema_version: '1.0'
+ providers:
+ - identification:
+ name: 'EXAMPLE_RESOURCE_PROVIDER'
+ # Additional valid identification examples:
+ # uuid: '$COMPUTE_NODE'
+ # uuid: '5213b75d-9260-42a6-b236-f39b0fd10561'
+ inventories:
+ additional:
+ - CUSTOM_EXAMPLE_RESOURCE_CLASS:
+ total: 100
+ reserved: 0
+ min_unit: 1
+ max_unit: 10
+ step_size: 1
+ allocation_ratio: 1.0
+ traits:
+ additional:
+ - 'CUSTOM_EXAMPLE_TRAIT'
+
+Schema Example
+--------------
+.. code-block:: yaml
+
+ type: object
+ properties:
+ # This property is used to track where the provider.yaml file originated.
+ # It is reserved for internal use and should never be set in a provider.yaml
+ # file supplied by an end user.
+ __source_file:
+ not: {}
+ meta:
+ type: object
+ properties:
+ # Version ($Major, $minor) of the schema must successfully parse
+ # documents conforming to ($Major, 0..N). Any breaking schema change
+ # (e.g. removing fields, adding new required fields, imposing a stricter
+ # pattern on a value, etc.) must bump $Major.
+ schema_version:
+ type: string
+ pattern: '^1\.([0-9]|[1-9][0-9]+)$'
+ required:
+ - schema_version
+ additionalProperties: true
+ providers:
+ type: array
+ items:
+ type: object
+ properties:
+ identification:
+ $ref: '#/provider_definitions/provider_identification'
+ inventories:
+ $ref: '#/provider_definitions/provider_inventories'
+ traits:
+ $ref: '#/provider_definitions/provider_traits'
+ required:
+ - identification
+ additionalProperties: true
+ required:
+ - meta
+ additionalProperties: true
+
+ provider_definitions:
+ provider_identification:
+ # Identify a single provider to configure. Exactly one identification
+ # method should be used. Currently `uuid` or `name` are supported, but
+ # future versions may support others.
+ # The uuid can be set to the sentinel value `$COMPUTE_NODE` which will
+ # cause the consuming compute service to apply the configuration to
+ # to all compute node root providers it manages that are not otherwise
+ # specified using a uuid or name.
+ type: object
+ properties:
+ uuid:
+ oneOf:
+ # TODO(sean-k-mooney): replace this with type uuid when we can depend
+ # on a version of the jsonschema lib that implements draft 8 or later
+ # of the jsonschema spec.
+ - type: string
+ pattern: '^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}$'
+ - type: string
+ const: '$COMPUTE_NODE'
+ name:
+ type: string
+ minLength: 1
+ # This introduces the possibility of an unsupported key name being used to
+ # get by schema validation, but is necessary to support forward
+ # compatibility with new identification methods. This should be checked
+ # after schema validation.
+ minProperties: 1
+ maxProperties: 1
+ additionalProperties: false
+ provider_inventories:
+ # Allows the admin to specify various adjectives to create and manage
+ # providers' inventories. This list of adjectives can be extended in the
+ # future as the schema evolves to meet new use cases. As of v1.0, only one
+ # adjective, `additional`, is supported.
+ type: object
+ properties:
+ additional:
+ type: array
+ items:
+ patternProperties:
+ # Allows any key name matching the resource class pattern,
+ # check to prevent conflicts with virt driver owned resouces classes
+ # will be done after schema validation.
+ ^[A-Z0-9_]{1,255}$:
+ type: object
+ properties:
+ # Any optional properties not populated will be given a default value by
+ # placement. If overriding a pre-existing provider values will not be
+ # preserved from the existing inventory.
+ total:
+ type: integer
+ reserved:
+ type: integer
+ min_unit:
+ type: integer
+ max_unit:
+ type: integer
+ step_size:
+ type: integer
+ allocation_ratio:
+ type: number
+ required:
+ - total
+ # The defined properties reflect the current placement data
+ # model. While defining those in the schema and not allowing
+ # additional properties means we will need to bump the schema
+ # version if they change, that is likely to be part of a large
+ # change that may have other impacts anyway. The benefit of
+ # stricter validation of property names outweighs the (small)
+ # chance of having to bump the schema version as described above.
+ additionalProperties: false
+ # This ensures only keys matching the pattern above are allowed
+ additionalProperties: false
+ additionalProperties: true
+ provider_traits:
+ # Allows the admin to specify various adjectives to create and manage
+ # providers' traits. This list of adjectives can be extended in the
+ # future as the schema evolves to meet new use cases. As of v1.0, only one
+ # adjective, `additional`, is supported.
+ type: object
+ properties:
+ additional:
+ type: array
+ items:
+ # Allows any value matching the trait pattern here, additional
+ # validation will be done after schema validation.
+ type: string
+ pattern: '^[A-Z0-9_]{1,255}$'
+ additionalProperties: true
+
+.. note::
+
+ When creating a ``provider.yaml`` config file it is recommended to use the
+ schema provided by nova to validate the config using a simple jsonschema
+ validator rather than starting the nova compute agent to enable faster
+ iteration.
+
diff --git a/doc/source/admin/metadata-service.rst b/doc/source/admin/metadata-service.rst
new file mode 100644
index 00000000000..d1d816610d1
--- /dev/null
+++ b/doc/source/admin/metadata-service.rst
@@ -0,0 +1,190 @@
+================
+Metadata service
+================
+
+.. note::
+
+ This section provides deployment information about the metadata service. For
+ end-user information about the metadata service and instance metadata in
+ general, refer to the :ref:`user guide `.
+
+The metadata service provides a way for instances to retrieve instance-specific
+data. Instances access the metadata service at ``http://169.254.169.254``. The
+metadata service supports two sets of APIs - an OpenStack metadata API and an
+EC2-compatible API - and also exposes vendordata and user data. Both the
+OpenStack metadata and EC2-compatible APIs are versioned by date.
+
+The metadata service can be run globally, as part of the :program:`nova-api`
+application, or on a per-cell basis, as part of the standalone
+:program:`nova-api-metadata` application. A detailed comparison is provided in
+the :ref:`cells V2 guide `.
+
+.. versionchanged:: 19.0.0
+
+ The ability to run the nova metadata API service on a per-cell basis was
+ added in Stein. For versions prior to this release, you should not use the
+ standalone :program:`nova-api-metadata` application for multiple cells.
+
+Guests access the service at ``169.254.169.254`` or at ``fe80::a9fe:a9fe``.
+
+.. versionchanged:: 22.0.0
+
+ Starting with the Victoria release the metadata service is accessible
+ over IPv6 at the link-local address ``fe80::a9fe:a9fe``.
+
+The networking service,
+neutron, is responsible for intercepting these requests and adding HTTP headers
+which uniquely identify the source of the request before forwarding it to the
+metadata API server. For the Open vSwitch and Linux Bridge backends provided
+with neutron, the flow looks something like so:
+
+#. Instance sends a HTTP request for metadata to ``169.254.169.254``.
+
+#. This request either hits the router or DHCP namespace depending on the route
+ in the instance
+
+#. The metadata proxy service in the namespace adds the following info to the
+ request:
+
+ - Instance IP (``X-Forwarded-For`` header)
+ - Router or Network-ID (``X-Neutron-Network-Id`` or ``X-Neutron-Router-Id``
+ header)
+
+#. The metadata proxy service sends this request to the metadata agent (outside
+ the namespace) via a UNIX domain socket.
+
+#. The :program:`neutron-metadata-agent` application forwards the request to the
+ nova metadata API service by adding some new headers (instance ID and Tenant
+ ID) to the request.
+
+This flow may vary if a different networking backend is used.
+
+Neutron and nova must be configured to communicate together with a shared
+secret. Neutron uses this secret to sign the Instance-ID header of the metadata
+request to prevent spoofing. This secret is configured through the
+:oslo.config:option:`neutron.metadata_proxy_shared_secret` config option in nova
+and the equivalent ``metadata_proxy_shared_secret`` config option in neutron.
+
+Configuration
+-------------
+
+The :program:`nova-api` application accepts the following metadata
+service-related options:
+
+- :oslo.config:option:`enabled_apis`
+- :oslo.config:option:`enabled_ssl_apis`
+- :oslo.config:option:`neutron.service_metadata_proxy`
+- :oslo.config:option:`neutron.metadata_proxy_shared_secret`
+- :oslo.config:option:`api.metadata_cache_expiration`
+- :oslo.config:option:`api.use_forwarded_for`
+- :oslo.config:option:`api.local_metadata_per_cell`
+- :oslo.config:option:`api.dhcp_domain`
+
+.. note::
+
+ This list excludes configuration options related to the vendordata feature.
+ Refer to :doc:`vendordata feature documentation ` for
+ information on configuring this.
+
+For example, to configure the :program:`nova-api` application to serve the
+metadata API, without SSL, using the ``StaticJSON`` vendordata provider, add the
+following to a :file:`nova-api.conf` file:
+
+.. code-block:: ini
+
+ [DEFAULT]
+ enabled_apis = osapi_compute,metadata
+ enabled_ssl_apis =
+ metadata_listen = 0.0.0.0
+ metadata_listen_port = 0
+ metadata_workers = 4
+
+ [neutron]
+ service_metadata_proxy = True
+
+ [api]
+ dhcp_domain =
+ metadata_cache_expiration = 15
+ use_forwarded_for = False
+ local_metadata_per_cell = False
+ vendordata_providers = StaticJSON
+ vendordata_jsonfile_path = /etc/nova/vendor_data.json
+
+.. note::
+
+ This does not include configuration options that are not metadata-specific
+ but are nonetheless required, such as
+ :oslo.config:option:`api.auth_strategy`.
+
+Configuring the application to use the ``DynamicJSON`` vendordata provider is
+more involved and is not covered here.
+
+The :program:`nova-api-metadata` application accepts almost the same options:
+
+- :oslo.config:option:`neutron.service_metadata_proxy`
+- :oslo.config:option:`neutron.metadata_proxy_shared_secret`
+- :oslo.config:option:`api.metadata_cache_expiration`
+- :oslo.config:option:`api.use_forwarded_for`
+- :oslo.config:option:`api.local_metadata_per_cell`
+- :oslo.config:option:`api.dhcp_domain`
+
+.. note::
+
+ This list excludes configuration options related to the vendordata feature.
+ Refer to :doc:`vendordata feature documentation ` for
+ information on configuring this.
+
+For example, to configure the :program:`nova-api-metadata` application to serve
+the metadata API, without SSL, add the following to a :file:`nova-api.conf`
+file:
+
+.. code-block:: ini
+
+ [DEFAULT]
+ metadata_listen = 0.0.0.0
+ metadata_listen_port = 0
+ metadata_workers = 4
+
+ [neutron]
+ service_metadata_proxy = True
+
+ [api]
+ dhcp_domain =
+ metadata_cache_expiration = 15
+ use_forwarded_for = False
+ local_metadata_per_cell = False
+
+.. note::
+
+ This does not include configuration options that are not metadata-specific
+ but are nonetheless required, such as
+ :oslo.config:option:`api.auth_strategy`.
+
+For information about configuring the neutron side of the metadata service,
+refer to the :neutron-doc:`neutron configuration guide
+`
+
+
+Config drives
+-------------
+
+Config drives are special drives that are attached to an instance when it boots.
+The instance can mount this drive and read files from it to get information that
+is normally available through the metadata service. For more information, refer
+to :doc:`/admin/config-drive` and the :ref:`user guide `.
+
+
+Vendordata
+----------
+
+Vendordata provides a way to pass vendor or deployment-specific information to
+instances. For more information, refer to :doc:`/admin/vendordata` and the
+:ref:`user guide `.
+
+
+User data
+---------
+
+User data is a blob of data that the user can specify when they launch an
+instance. For more information, refer to :ref:`the user guide
+`.
diff --git a/doc/source/admin/migrate-instance-with-snapshot.rst b/doc/source/admin/migrate-instance-with-snapshot.rst
index 06509003fc6..65059679abb 100644
--- a/doc/source/admin/migrate-instance-with-snapshot.rst
+++ b/doc/source/admin/migrate-instance-with-snapshot.rst
@@ -65,6 +65,10 @@ Create a snapshot of the instance
$ openstack server image create --name myInstanceSnapshot myInstance
+ If snapshot operations routinely fail because the user token times out
+ while uploading a large disk image, consider configuring nova to use
+ :ref:`service user tokens `.
+
#. Use the :command:`openstack image list` command to check the status
until the status is ``ACTIVE``:
diff --git a/doc/source/admin/migration.rst b/doc/source/admin/migration.rst
index 3020825e894..978a91a51ff 100644
--- a/doc/source/admin/migration.rst
+++ b/doc/source/admin/migration.rst
@@ -4,70 +4,97 @@ Migrate instances
.. note::
- This documentation is about cold-migration. For live-migration usage, see
+ This documentation is about cold migration. For live migration usage, see
:doc:`live-migration-usage`.
-When you want to move an instance from one compute host to another, you can use
-the :command:`openstack server migrate` command. The scheduler chooses the
-destination compute host based on its settings. This process does not assume
-that the instance has shared storage available on the target host. If you are
-using SSH tunneling, you must ensure that each node is configured with SSH key
-authentication so that the Compute service can use SSH to move disks to other
-nodes. For more information, see :ref:`cli-os-migrate-cfg-ssh`.
+When you want to move an instance from one compute host to another, you can
+migrate the instance. The migration operation, which is also known as the cold
+migration operation to distinguish it from the live migration operation,
+functions similarly to :doc:`the resize operation ` with the main
+difference being that a cold migration does not change the flavor of the
+instance. As with resize, the scheduler chooses the destination compute host
+based on its settings. This process does not assume that the instance has shared
+storage available on the target host. If you are using SSH tunneling, you must
+ensure that each node is configured with SSH key authentication so that the
+Compute service can use SSH to move disks to other nodes. For more information,
+see :ref:`cli-os-migrate-cfg-ssh`.
-#. To list the VMs you want to migrate, run:
+To list the VMs you want to migrate, run:
- .. code-block:: console
+.. code-block:: console
- $ openstack server list
+ $ openstack server list
-#. Use the :command:`openstack server migrate` command.
+Once you have the name or UUID of the server you wish to migrate, migrate it
+using the :command:`openstack server migrate` command:
- .. code-block:: console
+.. code-block:: console
- $ openstack server migrate VM_INSTANCE
+ $ openstack server migrate SERVER
-#. To migrate an instance and watch the status, use this example script:
+Once an instance has successfully migrated, you can use the :command:`openstack
+server migrate confirm` command to confirm it:
- .. code-block:: bash
+.. code-block:: console
- #!/bin/bash
+ $ openstack server migrate confirm SERVER
- # Provide usage
- usage() {
- echo "Usage: $0 VM_ID"
- exit 1
- }
+Alternatively, you can use the :command:`openstack server migrate revert`
+command to revert the migration and restore the instance to its previous host:
- [[ $# -eq 0 ]] && usage
+.. code-block:: console
- # Migrate the VM to an alternate hypervisor
- echo -n "Migrating instance to alternate host"
- VM_ID=$1
- openstack server migrate $VM_ID
- VM_OUTPUT=$(openstack server show $VM_ID)
- VM_STATUS=$(echo "$VM_OUTPUT" | grep status | awk '{print $4}')
- while [[ "$VM_STATUS" != "VERIFY_RESIZE" ]]; do
- echo -n "."
- sleep 2
- VM_OUTPUT=$(openstack server show $VM_ID)
- VM_STATUS=$(echo "$VM_OUTPUT" | grep status | awk '{print $4}')
- done
- openstack server resize --confirm $VM_ID
- echo " instance migrated and resized."
- echo;
+ $ openstack server migrate revert SERVER
- # Show the details for the VM
- echo "Updated instance details:"
- openstack server show $VM_ID
+.. note::
+
+ You can configure automatic confirmation of migrations and resizes. Refer to
+ the :oslo.config:option:`resize_confirm_window` option for more information.
+
+
+Example
+-------
+
+To migrate an instance and watch the status, use this example script:
+
+.. code-block:: bash
+
+ #!/bin/bash
+
+ # Provide usage
+ usage() {
+ echo "Usage: $0 VM_ID"
+ exit 1
+ }
+
+ [[ $# -eq 0 ]] && usage
+ VM_ID=$1
+
+ # Show the details for the VM
+ echo "Instance details:"
+ openstack server show ${VM_ID}
+
+ # Migrate the VM to an alternate hypervisor
+ echo -n "Migrating instance to alternate host "
+ openstack server migrate ${VM_ID}
+ while [[ "$(openstack server show ${VM_ID} -f value -c status)" != "VERIFY_RESIZE" ]]; do
+ echo -n "."
+ sleep 2
+ done
+ openstack server migrate confirm ${VM_ID}
+ echo " instance migrated and resized."
+
+ # Show the details for the migrated VM
+ echo "Migrated instance details:"
+ openstack server show ${VM_ID}
- # Pause to allow users to examine VM details
- read -p "Pausing, press to exit."
+ # Pause to allow users to examine VM details
+ read -p "Pausing, press to exit."
.. note::
If you see the following error, it means you are either running the command
- with the wrong credentials, such as a non-admin user, or the ``policy.json``
+ with the wrong credentials, such as a non-admin user, or the ``policy.yaml``
file prevents migration for your user::
Policy doesn't allow os_compute_api:os-migrate-server:migrate to be performed. (HTTP 403)
diff --git a/doc/source/admin/networking-nova.rst b/doc/source/admin/networking-nova.rst
deleted file mode 100644
index d071a5cbc7e..00000000000
--- a/doc/source/admin/networking-nova.rst
+++ /dev/null
@@ -1,873 +0,0 @@
-============================
-Networking with nova-network
-============================
-
-.. deprecated:: 14.0.0
-
- ``nova-network`` was deprecated in the OpenStack Newton release. In Ocata
- and future releases, you can start ``nova-network`` only with a cells v1
- configuration. This is not a recommended configuration for deployment.
-
-Understanding the networking configuration options helps you design the best
-configuration for your Compute instances.
-
-You can choose to either install and configure ``nova-network`` or use the
-OpenStack Networking service (neutron). This section contains a brief overview
-of ``nova-network``. For more information about OpenStack Networking, refer to
-:neutron-doc:`the documentation <>`.
-
-Networking concepts
-~~~~~~~~~~~~~~~~~~~
-
-Compute assigns a private IP address to each VM instance. Compute makes a
-distinction between fixed IPs and floating IP. Fixed IPs are IP addresses that
-are assigned to an instance on creation and stay the same until the instance is
-explicitly terminated. Floating IPs are addresses that can be dynamically
-associated with an instance. A floating IP address can be disassociated and
-associated with another instance at any time. A user can reserve a floating IP
-for their project.
-
-.. note::
-
- Currently, Compute with ``nova-network`` only supports Linux bridge
- networking that allows virtual interfaces to connect to the outside network
- through the physical interface.
-
-The network controller with ``nova-network`` provides virtual networks to
-enable compute servers to interact with each other and with the public network.
-Compute with ``nova-network`` supports the following network modes, which are
-implemented as Network Manager types:
-
-Flat Network Manager
- In this mode, a network administrator specifies a subnet. IP addresses for VM
- instances are assigned from the subnet, and then injected into the image on
- launch. Each instance receives a fixed IP address from the pool of available
- addresses. A system administrator must create the Linux networking bridge
- (typically named ``br100``, although this is configurable) on the systems
- running the ``nova-network`` service. All instances of the system are
- attached to the same bridge, which is configured manually by the network
- administrator.
-
-.. note::
-
- Configuration injection currently only works on Linux-style systems that
- keep networking configuration in ``/etc/network/interfaces``.
-
-Flat DHCP Network Manager
- In this mode, OpenStack starts a DHCP server (dnsmasq) to allocate IP
- addresses to VM instances from the specified subnet, in addition to manually
- configuring the networking bridge. IP addresses for VM instances are assigned
- from a subnet specified by the network administrator.
-
- Like flat mode, all instances are attached to a single bridge on the compute
- node. Additionally, a DHCP server configures instances depending on
- single-/multi-host mode, alongside each ``nova-network``. In this mode,
- Compute does a bit more configuration. It attempts to bridge into an Ethernet
- device (``flat_interface``, eth0 by default). For every instance, Compute
- allocates a fixed IP address and configures dnsmasq with the MAC ID and IP
- address for the VM. Dnsmasq does not take part in the IP address allocation
- process, it only hands out IPs according to the mapping done by Compute.
- Instances receive their fixed IPs with the :command:`dhcpdiscover` command.
- These IPs are not assigned to any of the host's network interfaces, only to
- the guest-side interface for the VM.
-
- In any setup with flat networking, the hosts providing the ``nova-network``
- service are responsible for forwarding traffic from the private network. They
- also run and configure dnsmasq as a DHCP server listening on this bridge,
- usually on IP address 10.0.0.1 (see :ref:`compute-dnsmasq`). Compute can
- determine the NAT entries for each network, although sometimes NAT is not
- used, such as when the network has been configured with all public IPs, or if
- a hardware router is used (which is a high availability option). In this
- case, hosts need to have ``br100`` configured and physically connected to any
- other nodes that are hosting VMs. You must set the ``flat_network_bridge``
- option or create networks with the bridge parameter in order to avoid raising
- an error. Compute nodes have iptables or ebtables entries created for each
- project and instance to protect against MAC ID or IP address spoofing and ARP
- poisoning.
-
-.. note::
-
- In single-host Flat DHCP mode you will be able to ping VMs through their
- fixed IP from the ``nova-network`` node, but you cannot ping them from the
- compute nodes. This is expected behavior.
-
-VLAN Network Manager
- This is the default mode for OpenStack Compute. In this mode, Compute creates
- a VLAN and bridge for each project. For multiple-machine installations, the
- VLAN Network Mode requires a switch that supports VLAN tagging (IEEE 802.1Q).
- The project gets a range of private IPs that are only accessible from inside
- the VLAN. In order for a user to access the instances in their project, a
- special VPN instance (code named ``cloudpipe``) needs to be created. Compute
- generates a certificate and key for the user to access the VPN and starts the
- VPN automatically. It provides a private network segment for each project's
- instances that can be accessed through a dedicated VPN connection from the
- internet. In this mode, each project gets its own VLAN, Linux networking
- bridge, and subnet.
-
- The subnets are specified by the network administrator, and are assigned
- dynamically to a project when required. A DHCP server is started for each
- VLAN to pass out IP addresses to VM instances from the subnet assigned to the
- project. All instances belonging to one project are bridged into the same
- VLAN for that project. OpenStack Compute creates the Linux networking bridges
- and VLANs when required.
-
-These network managers can co-exist in a cloud system. However, because you
-cannot select the type of network for a given project, you cannot configure
-multiple network types in a single Compute installation.
-
-All network managers configure the network using network drivers. For example,
-the Linux L3 driver (``l3.py`` and ``linux_net.py``), which makes use of
-``iptables``, ``route`` and other network management facilities, and the
-libvirt `network filtering facilities
-`__. The driver is not tied to any
-particular network manager; all network managers use the same driver. The
-driver usually initializes only when the first VM lands on this host node.
-
-All network managers operate in either single-host or multi-host mode. This
-choice greatly influences the network configuration. In single-host mode, a
-single ``nova-network`` service provides a default gateway for VMs and hosts a
-single DHCP server (dnsmasq). In multi-host mode, each compute node runs its
-own ``nova-network`` service. In both cases, all traffic between VMs and the
-internet flows through ``nova-network``. Each mode has benefits and drawbacks.
-
-All networking options require network connectivity to be already set up
-between OpenStack physical nodes. OpenStack does not configure any physical
-network interfaces. All network managers automatically create VM virtual
-interfaces. Some network managers can also create network bridges such as
-``br100``.
-
-The internal network interface is used for communication with VMs. The
-interface should not have an IP address attached to it before OpenStack
-installation, it serves only as a fabric where the actual endpoints are VMs and
-dnsmasq. Additionally, the internal network interface must be in
-``promiscuous`` mode, so that it can receive packets whose target MAC address
-is the guest VM, not the host.
-
-All machines must have a public and internal network interface (controlled by
-these options: ``public_interface`` for the public interface, and
-``flat_interface`` and ``vlan_interface`` for the internal interface with flat
-or VLAN managers). This guide refers to the public network as the external
-network and the private network as the internal or project network.
-
-For flat and flat DHCP modes, use the :command:`nova network-create` command to
-create a network:
-
-.. code-block:: console
-
- $ nova network-create vmnet \
- --fixed-range-v4 10.0.0.0/16 --fixed-cidr 10.0.20.0/24 --bridge br100
-
-This example uses the following parameters:
-
-``--fixed-range-v4``
- Specifies the network subnet.
-``--fixed-cidr``
- Specifies a range of fixed IP addresses to allocate, and can be a subset of
- the ``--fixed-range-v4`` argument.
-``--bridge``
- Specifies the bridge device to which this network is connected on every
- compute node.
-
-.. _compute-dnsmasq:
-
-DHCP server: dnsmasq
-~~~~~~~~~~~~~~~~~~~~
-
-The Compute service uses `dnsmasq
-`__ as the DHCP server when
-using either Flat DHCP Network Manager or VLAN Network Manager. For Compute to
-operate in IPv4/IPv6 dual-stack mode, use at least dnsmasq v2.63. The
-``nova-network`` service is responsible for starting dnsmasq processes.
-
-The behavior of dnsmasq can be customized by creating a dnsmasq configuration
-file. Specify the configuration file using the ``dnsmasq_config_file``
-configuration option:
-
-.. code-block:: ini
-
- dnsmasq_config_file=/etc/dnsmasq-nova.conf
-
-For more information about creating a dnsmasq configuration file, see the
-:doc:`/configuration/config`, and `the dnsmasq documentation
-`__.
-
-Dnsmasq also acts as a caching DNS server for instances. You can specify the
-DNS server that dnsmasq uses by setting the ``dns_server`` configuration option
-in ``/etc/nova/nova.conf``. This example configures dnsmasq to use Google's
-public DNS server:
-
-.. code-block:: ini
-
- dns_server=8.8.8.8
-
-Dnsmasq logs to syslog (typically ``/var/log/syslog`` or ``/var/log/messages``,
-depending on Linux distribution). Logs can be useful for troubleshooting,
-especially in a situation where VM instances boot successfully but are not
-reachable over the network.
-
-Administrators can specify the starting point IP address to reserve with the
-DHCP server (in the format n.n.n.n) with this command:
-
-.. code-block:: console
-
- $ nova-manage fixed reserve --address IP_ADDRESS
-
-This reservation only affects which IP address the VMs start at, not the fixed
-IP addresses that ``nova-network`` places on the bridges.
-
-Configure Compute to use IPv6 addresses
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If you are using OpenStack Compute with ``nova-network``, you can put Compute
-into dual-stack mode, so that it uses both IPv4 and IPv6 addresses for
-communication. In dual-stack mode, instances can acquire their IPv6 global
-unicast addresses by using a stateless address auto-configuration mechanism
-[RFC 4862/2462]. IPv4/IPv6 dual-stack mode works with both ``VlanManager`` and
-``FlatDHCPManager`` networking modes.
-
-In ``VlanManager`` networking mode, each project uses a different 64-bit global
-routing prefix. In ``FlatDHCPManager`` mode, all instances use one 64-bit
-global routing prefix.
-
-This configuration was tested with virtual machine images that have an IPv6
-stateless address auto-configuration capability. This capability is required
-for any VM to run with an IPv6 address. You must use an EUI-64 address for
-stateless address auto-configuration. Each node that executes a ``nova-*``
-service must have ``python-netaddr`` and ``radvd`` installed.
-
-.. rubric:: Switch into IPv4/IPv6 dual-stack mode
-
-#. For every node running a ``nova-*`` service, install ``python-netaddr``:
-
- .. code-block:: console
-
- # apt-get install python-netaddr
-
-#. For every node running ``nova-network``, install ``radvd`` and configure
- IPv6 networking:
-
- .. code-block:: console
-
- # apt-get install radvd
- # echo 1 > /proc/sys/net/ipv6/conf/all/forwarding
- # echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra
-
-#. On all nodes, edit the ``nova.conf`` file and specify ``use_ipv6 = True``.
-
-#. Restart all ``nova-*`` services.
-
-.. rubric:: IPv6 configuration options
-
-You can use the following options with the :command:`nova network-create`
-command:
-
-- Add a fixed range for IPv6 addresses to the :command:`nova network-create`
- command. Specify ``public`` or ``private`` after the ``network-create``
- parameter.
-
- .. code-block:: console
-
- $ nova network-create public --fixed-range-v4 FIXED_RANGE_V4 \
- --vlan VLAN_ID --vpn VPN_START --fixed-range-v6 FIXED_RANGE_V6
-
-- Set the IPv6 global routing prefix by using the ``--fixed_range_v6``
- parameter. The default value for the parameter is ``fd00::/48``.
-
- When you use ``FlatDHCPManager``, the command uses the original
- ``--fixed_range_v6`` value. For example:
-
- .. code-block:: console
-
- $ nova network-create public --fixed-range-v4 10.0.2.0/24 \
- --fixed-range-v6 fd00:1::/48
-
-- When you use ``VlanManager``, the command increments the subnet ID to create
- subnet prefixes. Guest VMs use this prefix to generate their IPv6 global
- unicast addresses. For example:
-
- .. code-block:: console
-
- $ nova network-create public --fixed-range-v4 10.0.1.0/24 --vlan 100 \
- --vpn 1000 --fixed-range-v6 fd00:1::/48
-
-.. list-table:: Description of IPv6 configuration options
- :header-rows: 2
-
- * - Configuration option = Default value
- - Description
- * - [DEFAULT]
- -
- * - fixed_range_v6 = fd00::/48
- - (StrOpt) Fixed IPv6 address block
- * - gateway_v6 = None
- - (StrOpt) Default IPv6 gateway
- * - ipv6_backend = rfc2462
- - (StrOpt) Backend to use for IPv6 generation
- * - use_ipv6 = False
- - (BoolOpt) Use IPv6
-
-.. _metadata-service-deploy:
-
-Metadata service
-~~~~~~~~~~~~~~~~
-
-.. TODO: This should be moved into its own document once we add information
- about integrating this with neutron rather than nova-network.
-
-This section provides deployment information about the metadata service. For
-end-user information about the metadata service, see the
-:doc:`user guide `.
-
-The metadata service is implemented by either the ``nova-api`` service or the
-``nova-api-metadata`` service. Note that the ``nova-api-metadata`` service is
-generally only used when running in multi-host mode, as it retrieves
-instance-specific metadata. If you are running the ``nova-api`` service, you
-must have ``metadata`` as one of the elements listed in the ``enabled_apis``
-configuration option in ``/etc/nova/nova.conf``. The default ``enabled_apis``
-configuration setting includes the metadata service, so you do not need to
-modify it.
-
-Hosts access the service at ``169.254.169.254:80``, and this is translated to
-``metadata_host:metadata_port`` by an iptables rule established by the
-``nova-network`` service. In multi-host mode, you can set ``metadata_host`` to
-``127.0.0.1``.
-
-For instances to reach the metadata service, the ``nova-network`` service must
-configure iptables to NAT port ``80`` of the ``169.254.169.254`` address to the
-IP address specified in ``metadata_host`` (this defaults to ``$my_ip``, which
-is the IP address of the ``nova-network`` service) and port specified in
-``metadata_port`` (which defaults to ``8775``) in ``/etc/nova/nova.conf``.
-
-.. note::
-
- The ``metadata_host`` configuration option must be an IP address, not a host
- name.
-
-The default Compute service settings assume that ``nova-network`` and
-``nova-api`` are running on the same host. If this is not the case, in the
-``/etc/nova/nova.conf`` file on the host running ``nova-network``, set the
-``metadata_host`` configuration option to the IP address of the host where
-``nova-api`` is running.
-
-.. TODO: Consider grouping the metadata options into the same [metadata]
- group and then we can just link to that in the generated config option doc.
-
-.. list-table:: Description of metadata configuration options
- :header-rows: 2
-
- * - Configuration option = Default value
- - Description
- * - [DEFAULT]
- -
- * - :oslo.config:option:`metadata_host` = $my_ip
- - (StrOpt) The IP address for the metadata API server
- * - :oslo.config:option:`metadata_listen` = 0.0.0.0
- - (StrOpt) The IP address on which the metadata API will listen.
- * - :oslo.config:option:`metadata_listen_port` = 8775
- - (IntOpt) The port on which the metadata API will listen.
- * - :oslo.config:option:`metadata_port` = 8775
- - (IntOpt) The port for the metadata API port
- * - :oslo.config:option:`metadata_workers` = None
- - (IntOpt) Number of workers for metadata service. The default will be
- the number of CPUs available.
- * - **[api]**
- -
- * - :oslo.config:option:`metadata_cache_expiration ` = 15
- - (IntOpt) Time in seconds to cache metadata; 0 to disable metadata
- caching entirely (not recommended). Increasing this should improve
- response times of the metadata API when under heavy load. Higher values
- may increase memory usage and result in longer times for host metadata
- changes to take effect.
- * - :oslo.config:option:`vendordata_providers ` = StaticJSON
- - (ListOpt) A list of vendordata providers. See
- :doc:`Vendordata ` for more information.
- * - :oslo.config:option:`vendordata_jsonfile_path ` = None
- - (StrOpt) File to load JSON formatted vendor data from
-
-Enable ping and SSH on VMs
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You need to enable ``ping`` and ``ssh`` on your VMs for network access. This
-can be done with the :command:`openstack` command.
-
-.. note::
-
- Run these commands as root only if the credentials used to interact with
- ``nova-api`` are in ``/root/.bashrc``.
-
-Enable ping and SSH with :command:`openstack security group rule create`
-commands:
-
-.. code-block:: console
-
- $ openstack security group rule create --protocol icmp default
- $ openstack security group rule create --protocol tcp --dst-port 22:22 default
-
-If you have run these commands and still cannot ping or SSH your instances,
-check the number of running ``dnsmasq`` processes, there should be two. If not,
-kill the processes and restart the service with these commands:
-
-.. code-block:: console
-
- # killall dnsmasq
- # service nova-network restart
-
-Configure public (floating) IP addresses
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This section describes how to configure floating IP addresses with
-``nova-network``. For information about doing this with OpenStack Networking,
-refer to :neutron-doc:`L3-routing-and-NAT
-`.
-
-Private and public IP addresses
--------------------------------
-
-In this section, the term floating IP address is used to refer to an IP
-address, usually public, that you can dynamically add to a running virtual
-instance.
-
-Every virtual instance is automatically assigned a private IP address. You can
-choose to assign a public (or floating) IP address instead. OpenStack Compute
-uses network address translation (NAT) to assign floating IPs to virtual
-instances.
-
-To be able to assign a floating IP address, edit the ``/etc/nova/nova.conf``
-file to specify which interface the ``nova-network`` service should bind public
-IP addresses to:
-
-.. code-block:: ini
-
- public_interface=VLAN100
-
-If you make changes to the ``/etc/nova/nova.conf`` file while the
-``nova-network`` service is running, you will need to restart the service to
-pick up the changes.
-
-.. note::
-
- Floating IPs are implemented by using a source NAT (SNAT rule in iptables),
- so security groups can sometimes display inconsistent behavior if VMs use
- their floating IP to communicate with other VMs, particularly on the same
- physical host. Traffic from VM to VM across the fixed network does not have
- this issue, and so this is the recommended setup. To ensure that traffic
- does not get SNATed to the floating range, explicitly set:
-
- .. code-block:: ini
-
- dmz_cidr=x.x.x.x/y
-
- The ``x.x.x.x/y`` value specifies the range of floating IPs for each pool of
- floating IPs that you define. This configuration is also required if the VMs
- in the source group have floating IPs.
-
-Enable IP forwarding
---------------------
-
-IP forwarding is disabled by default on most Linux distributions. You will need
-to enable it in order to use floating IPs.
-
-.. note::
-
- IP forwarding only needs to be enabled on the nodes that run
- ``nova-network``. However, you will need to enable it on all compute nodes
- if you use ``multi_host`` mode.
-
-To check if IP forwarding is enabled, run:
-
-.. code-block:: console
-
- $ cat /proc/sys/net/ipv4/ip_forward
- 0
-
-Alternatively, run:
-
-.. code-block:: console
-
- $ sysctl net.ipv4.ip_forward
- net.ipv4.ip_forward = 0
-
-In these examples, IP forwarding is disabled.
-
-To enable IP forwarding dynamically, run:
-
-.. code-block:: console
-
- # sysctl -w net.ipv4.ip_forward=1
-
-Alternatively, run:
-
-.. code-block:: console
-
- # echo 1 > /proc/sys/net/ipv4/ip_forward
-
-To make the changes permanent, edit the ``/etc/sysctl.conf`` file and update
-the IP forwarding setting:
-
-.. code-block:: ini
-
- net.ipv4.ip_forward = 1
-
-Save the file and run this command to apply the changes:
-
-.. code-block:: console
-
- # sysctl -p
-
-You can also apply the changes by restarting the network service:
-
-- on Ubuntu, Debian:
-
- .. code-block:: console
-
- # /etc/init.d/networking restart
-
-- on RHEL, Fedora, CentOS, openSUSE and SLES:
-
- .. code-block:: console
-
- # service network restart
-
-Create a list of available floating IP addresses
-------------------------------------------------
-
-Compute maintains a list of floating IP addresses that are available for
-assigning to instances. Use the :command:`nova-manage floating` commands to
-perform floating IP operations:
-
-- Add entries to the list:
-
- .. code-block:: console
-
- # nova-manage floating create --pool nova --ip_range 68.99.26.170/31
-
-- List the floating IP addresses in the pool:
-
- .. code-block:: console
-
- # openstack floating ip list
-
-- Create specific floating IPs for either a single address or a subnet:
-
- .. code-block:: console
-
- # nova-manage floating create --pool POOL_NAME --ip_range CIDR
-
-- Remove floating IP addresses using the same parameters as the create command:
-
- .. code-block:: console
-
- # openstack floating ip delete CIDR
-
-For more information about how administrators can associate floating IPs with
-instances, see :python-openstackclient-doc:`ip floating
-` in the *python-openstackclient* User
-Documentation.
-
-Automatically add floating IPs
-------------------------------
-
-You can configure ``nova-network`` to automatically allocate and assign a
-floating IP address to virtual instances when they are launched. Add this line
-to the ``/etc/nova/nova.conf`` file:
-
-.. code-block:: ini
-
- auto_assign_floating_ip=True
-
-Save the file, and restart ``nova-network``
-
-.. note::
-
- If this option is enabled, but all floating IP addresses have already been
- allocated, the :command:`openstack server create` command will fail.
-
-Remove a network from a project
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You cannot delete a network that has been associated to a project. This section
-describes the procedure for dissociating it so that it can be deleted.
-
-In order to disassociate the network, you will need the ID of the project it
-has been associated to. To get the project ID, you will need to be an
-administrator.
-
-Disassociate the network from the project using the :command:`nova-manage
-project scrub` command, with the project ID as the final parameter:
-
-.. code-block:: console
-
- # nova-manage project scrub --project ID
-
-Multiple interfaces for instances (multinic)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The multinic feature allows you to use more than one interface with your
-instances. This is useful in several scenarios:
-
-- SSL Configurations (VIPs)
-
-- Services failover/HA
-
-- Bandwidth Allocation
-
-- Administrative/Public access to your instances
-
-Each VIP represents a separate network with its own IP block. Every network
-mode has its own set of changes regarding multinic usage:
-
-.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-manager.jpg
- :width: 600
-
-.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-DHCP-manager.jpg
- :width: 600
-
-.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-VLAN-manager.jpg
- :width: 600
-
-Using multinic
---------------
-
-In order to use multinic, create two networks, and attach them to the project
-(named ``project`` on the command line):
-
-.. code-block:: console
-
- $ nova network-create first-net --fixed-range-v4 20.20.0.0/24 --project-id $your-project
- $ nova network-create second-net --fixed-range-v4 20.20.10.0/24 --project-id $your-project
-
-Each new instance will now receive two IP addresses from their respective DHCP
-servers:
-
-.. code-block:: console
-
- $ openstack server list
- +---------+----------+--------+-----------------------------------------+------------+
- |ID | Name | Status | Networks | Image Name |
- +---------+----------+--------+-----------------------------------------+------------+
- | 1234... | MyServer | ACTIVE | network2=20.20.0.3; private=20.20.10.14 | cirros |
- +---------+----------+--------+-----------------------------------------+------------+
-
-.. note::
-
- Make sure you start the second interface on the instance, or it won't be
- reachable through the second IP.
-
-This example demonstrates how to set up the interfaces within the instance.
-This is the configuration that needs to be applied inside the image.
-
-Edit the ``/etc/network/interfaces`` file:
-
-.. code-block:: bash
-
- # The loopback network interface
- auto lo
- iface lo inet loopback
-
- auto eth0
- iface eth0 inet dhcp
-
- auto eth1
- iface eth1 inet dhcp
-
-If the Virtual Network Service Neutron is installed, you can specify the
-networks to attach to the interfaces by using the ``--nic`` flag with the
-:command:`openstack server create` command:
-
-.. code-block:: console
-
- $ openstack server create --image ed8b2a37-5535-4a5f-a615-443513036d71 \
- --flavor 1 --nic net-id=NETWORK1_ID --nic net-id=NETWORK2_ID test-vm1
-
-Troubleshooting Networking
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Cannot reach floating IPs
--------------------------
-
-Problem
--------
-
-You cannot reach your instances through the floating IP address.
-
-Solution
---------
-
-- Check that the default security group allows ICMP (ping) and SSH (port 22),
- so that you can reach the instances:
-
- .. code-block:: console
-
- $ openstack security group rule list default
- +--------------------------------------+-------------+-----------+-----------------+-----------------------+
- | ID | IP Protocol | IP Range | Port Range | Remote Security Group |
- +--------------------------------------+-------------+-----------+-----------------+-----------------------+
- | 63536865-e5b6-4df1-bac5-ca6d97d8f54d | tcp | 0.0.0.0/0 | 22:22 | None |
- | e9d3200f-647a-4293-a9fc-e65ceee189ae | icmp | 0.0.0.0/0 | type=1:code=-1 | None |
- +--------------------------------------+-------------+-----------+-----------------+-----------------------+
-
-- Check the NAT rules have been added to iptables on the node that is running
- ``nova-network``:
-
- .. code-block:: console
-
- # iptables -L -nv -t nat \
- -A nova-network-PREROUTING -d 68.99.26.170/32 -j DNAT --to-destination 10.0.0.3 \
- -A nova-network-floating-snat -s 10.0.0.3/32 -j SNAT --to-source 68.99.26.170
-
-- Check that the public address (``68.99.26.170`` in this example), has been
- added to your public interface. You should see the address in the listing
- when you use the :command:`ip addr` command:
-
- .. code-block:: console
-
- $ ip addr
- 2: eth0: mtu 1500 qdisc mq state UP qlen 1000
- link/ether xx:xx:xx:17:4b:c2 brd ff:ff:ff:ff:ff:ff
- inet 13.22.194.80/24 brd 13.22.194.255 scope global eth0
- inet 68.99.26.170/32 scope global eth0
- inet6 fe80::82b:2bf:fe1:4b2/64 scope link
- valid_lft forever preferred_lft forever
-
- .. note::
-
- You cannot use ``SSH`` to access an instance with a public IP from within
- the same server because the routing configuration does not allow it.
-
-- Use ``tcpdump`` to identify if packets are being routed to the inbound
- interface on the compute host. If the packets are reaching the compute hosts
- but the connection is failing, the issue may be that the packet is being
- dropped by reverse path filtering. Try disabling reverse-path filtering on
- the inbound interface. For example, if the inbound interface is ``eth2``,
- run:
-
- .. code-block:: console
-
- # sysctl -w net.ipv4.conf.ETH2.rp_filter=0
-
- If this solves the problem, add the following line to ``/etc/sysctl.conf`` so
- that the reverse-path filter is persistent:
-
- .. code-block:: ini
-
- net.ipv4.conf.rp_filter=0
-
-Temporarily disable firewall
-----------------------------
-
-Problem
--------
-
-Networking issues prevent administrators accessing or reaching VMs through
-various pathways.
-
-Solution
---------
-
-You can disable the firewall by setting this option in ``/etc/nova/nova.conf``:
-
-.. code-block:: ini
-
- firewall_driver=nova.virt.firewall.NoopFirewallDriver
-
-.. warning::
-
- We strongly recommend you remove this line to re-enable the firewall once
- your networking issues have been resolved.
-
-Packet loss from instances to nova-network server (VLANManager mode)
---------------------------------------------------------------------
-
-Problem
--------
-
-If you can access your instances with ``SSH`` but the network to your instance
-is slow, or if you find that running certain operations are slower than they
-should be (for example, ``sudo``), packet loss could be occurring on the
-connection to the instance.
-
-Packet loss can be caused by Linux networking configuration settings related to
-bridges. Certain settings can cause packets to be dropped between the VLAN
-interface (for example, ``vlan100``) and the associated bridge interface (for
-example, ``br100``) on the host running ``nova-network``.
-
-Solution
---------
-
-One way to check whether this is the problem is to open three terminals and run
-the following commands:
-
-#. In the first terminal, on the host running ``nova-network``, use ``tcpdump``
- on the VLAN interface to monitor DNS-related traffic (UDP, port 53). As
- root, run:
-
- .. code-block:: console
-
- # tcpdump -K -p -i vlan100 -v -vv udp port 53
-
-#. In the second terminal, also on the host running ``nova-network``, use
- ``tcpdump`` to monitor DNS-related traffic on the bridge interface. As
- root, run:
-
- .. code-block:: console
-
- # tcpdump -K -p -i br100 -v -vv udp port 53
-
-#. In the third terminal, use ``SSH`` to access the instance and generate DNS
- requests by using the :command:`nslookup` command:
-
- .. code-block:: console
-
- $ nslookup www.google.com
-
- The symptoms may be intermittent, so try running :command:`nslookup`
- multiple times. If the network configuration is correct, the command should
- return immediately each time. If it is not correct, the command hangs for
- several seconds before returning.
-
-#. If the :command:`nslookup` command sometimes hangs, and there are packets
- that appear in the first terminal but not the second, then the problem may
- be due to filtering done on the bridges. Try disabling filtering, and
- running these commands as root:
-
- .. code-block:: console
-
- # sysctl -w net.bridge.bridge-nf-call-arptables=0
- # sysctl -w net.bridge.bridge-nf-call-iptables=0
- # sysctl -w net.bridge.bridge-nf-call-ip6tables=0
-
- If this solves your issue, add the following line to ``/etc/sysctl.conf`` so
- that the changes are persistent:
-
- .. code-block:: ini
-
- net.bridge.bridge-nf-call-arptables=0
- net.bridge.bridge-nf-call-iptables=0
- net.bridge.bridge-nf-call-ip6tables=0
-
-KVM: Network connectivity works initially, then fails
------------------------------------------------------
-
-Problem
--------
-
-With KVM hypervisors, instances running Ubuntu 12.04 sometimes lose network
-connectivity after functioning properly for a period of time.
-
-Solution
---------
-
-Try loading the ``vhost_net`` kernel module as a workaround for this issue (see
-`bug #997978`_) . This kernel module may also `improve network performance`_
-on KVM. To load the kernel module:
-
-.. _`bug #997978`: https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/997978/
-.. _`improve network performance`: http://www.linux-kvm.org/page/VhostNet
-
-.. code-block:: console
-
- # modprobe vhost_net
-
-.. note::
-
- Loading the module has no effect on running instances.
diff --git a/doc/source/admin/networking.rst b/doc/source/admin/networking.rst
index 83e4d3df3a2..667a5bf12f3 100644
--- a/doc/source/admin/networking.rst
+++ b/doc/source/admin/networking.rst
@@ -24,6 +24,18 @@ A full guide on configuring and using SR-IOV is provided in the
:neutron-doc:`OpenStack Networking service documentation
`
+.. note::
+
+ Nova only supports PCI addresses where the fields are restricted to the
+ following maximum value:
+
+ * domain - 0xFFFF
+ * bus - 0xFF
+ * slot - 0x1F
+ * function - 0x7
+
+ Nova will ignore PCI devices reported by the hypervisor if the address is
+ outside of these ranges.
NUMA Affinity
-------------
@@ -50,14 +62,10 @@ Fortunately, nova provides functionality to ensure NUMA affinitization is
provided for instances using neutron. How this works depends on the type of
port you are trying to use.
-.. todo::
-
- Add documentation for PCI NUMA affinity and PCI policies and link to it from
- here.
-
For SR-IOV ports, virtual functions, which are PCI devices, are attached to the
instance. This means the instance can benefit from the NUMA affinity guarantees
-provided for PCI devices. This happens automatically.
+provided for PCI devices. This happens automatically and is described in detail
+in :ref:`pci-numa-affinity-policy`.
For all other types of ports, some manual configuration is required.
@@ -84,7 +92,7 @@ For all other types of ports, some manual configuration is required.
Consider an L2-type network using the Linux Bridge mechanism driver. As
noted in the :neutron-doc:`neutron documentation
- `, *physets* are mapped to interfaces
+ `, *physnets* are mapped to interfaces
using the ``[linux_bridge] physical_interface_mappings`` configuration
option. For example:
@@ -102,9 +110,9 @@ For all other types of ports, some manual configuration is required.
For an L3-type network using the Linux Bridge mechanism driver, the device
used will be configured using protocol-specific endpoint IP configuration
- option. For VXLAN, this is the ``[vxlan] local_ip`` option. For example::
+ option. For VXLAN, this is the ``[vxlan] local_ip`` option. For example:
- .. code-block::
+ .. code-block:: ini
[vxlan]
local_ip = OVERLAY_INTERFACE_IP_ADDRESS
@@ -142,10 +150,18 @@ For all other types of ports, some manual configuration is required.
networks on a given host. There is only one configuration option that must
be set:
- ``[neutron_tunneled] numa_nodes``
+ ``[neutron_tunnel] numa_nodes``
This should be set to a list of one or NUMA nodes to which instances using
tunneled networks will be affinitized.
+#. Configure a NUMA topology for instance flavor(s)
+
+ For network NUMA affinity to have any effect, the instance must have a NUMA
+ topology itself. This can be configured explicitly, using the
+ ``hw:numa_nodes`` extra spec, or implicitly through the use of CPU pinning
+ (``hw:cpu_policy=dedicated``) or PCI devices. For more information, refer to
+ :doc:`cpu-topologies`.
+
Examples
~~~~~~~~
@@ -167,14 +183,15 @@ with ``provider:physical_network=foo`` must be scheduled on host cores from
NUMA nodes 0, while instances using one or more networks with
``provider:physical_network=bar`` must be scheduled on host cores from both
NUMA nodes 2 and 3. For the latter case, it will be necessary to split the
-guest across two or more host NUMA nodes using the ``hw:numa_nodes``
-:ref:`flavor extra spec `.
+guest across two or more host NUMA nodes using the
+:nova:extra-spec:`hw:numa_nodes` extra spec, as discussed :ref:`here
+`.
Now, take an example for a deployment using L3 networks.
.. code-block:: ini
- [neutron_tunneled]
+ [neutron_tunnel]
numa_nodes = 0
This is much simpler as all tunneled traffic uses the same logical interface.
diff --git a/doc/source/admin/node-down.rst b/doc/source/admin/node-down.rst
index f2d5c509688..58311e80888 100644
--- a/doc/source/admin/node-down.rst
+++ b/doc/source/admin/node-down.rst
@@ -145,7 +145,7 @@ A disk crash, network loss, or power failure can affect several components in
your cloud architecture. The worst disaster for a cloud is a power loss. A
power loss affects these components:
-- A cloud controller (``nova-api``, ``nova-objectstore``, ``nova-network``)
+- A cloud controller (``nova-api``, ``nova-conductor``, ``nova-scheduler``)
- A compute node (``nova-compute``)
@@ -178,9 +178,6 @@ After power resumes and all hardware components restart:
- The iSCSI session from the cloud controller to the compute node no longer
exists.
-- nova-network reapplies configurations on boot and, as a result, recreates
- the iptables and ebtables from the cloud controller to the compute node.
-
- Instances stop running.
Instances are not lost because neither ``destroy`` nor ``terminate`` ran.
diff --git a/doc/source/admin/pci-passthrough.rst b/doc/source/admin/pci-passthrough.rst
index f4f06e0c5df..727a63070de 100644
--- a/doc/source/admin/pci-passthrough.rst
+++ b/doc/source/admin/pci-passthrough.rst
@@ -15,150 +15,264 @@ as multiple PCI devices. Virtual PCI devices are assigned to the same or
different guests. In the case of PCI passthrough, the full physical device is
assigned to only one guest and cannot be shared.
+PCI devices are requested through flavor extra specs, specifically via the
+:nova:extra-spec:`pci_passthrough:alias` flavor extra spec.
+This guide demonstrates how to enable PCI passthrough for a type of PCI device
+with a vendor ID of ``8086`` and a product ID of ``154d`` - an Intel X520
+Network Adapter - by mapping them to the alias ``a1``.
+You should adjust the instructions for other devices with potentially different
+capabilities.
+
.. note::
- For information on attaching virtual SR-IOV devices to guests, refer to the
- :neutron-doc:`Networking Guide `.
+ For information on creating servers with SR-IOV network interfaces, refer to
+ the :neutron-doc:`Networking Guide `.
-To enable PCI passthrough, follow the steps below:
+ **Limitations**
-#. Configure nova-scheduler (Controller)
+ * Attaching SR-IOV ports to existing servers was not supported until the
+ 22.0.0 Victoria release. Due to various bugs in libvirt and qemu we
+ recommend to use at least libvirt version 6.0.0 and at least qemu version
+ 4.2.
+ * Cold migration (resize) of servers with SR-IOV devices attached was not
+ supported until the 14.0.0 Newton release, see
+ `bug 1512800 `_ for details.
-#. Configure nova-api (Controller)**
+.. note::
-#. Configure a flavor (Controller)
+ Nova only supports PCI addresses where the fields are restricted to the
+ following maximum value:
-#. Enable PCI passthrough (Compute)
+ * domain - 0xFFFF
+ * bus - 0xFF
+ * slot - 0x1F
+ * function - 0x7
-#. Configure PCI devices in nova-compute (Compute)
+ Nova will ignore PCI devices reported by the hypervisor if the address is
+ outside of these ranges.
-.. note::
+Enabling PCI passthrough
+------------------------
- The PCI device with address ``0000:41:00.0`` is used as an example. This
- will differ between environments.
+Configure compute host
+~~~~~~~~~~~~~~~~~~~~~~
-Configure nova-scheduler (Controller)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+To enable PCI passthrough on an x86, Linux-based compute node, the following
+are required:
-#. Configure ``nova-scheduler`` as specified in :neutron-doc:`Configure
- nova-scheduler
- ` for ``numa_policy``
+information.
- For more information about the syntax of ``alias``, refer to :doc:`/configuration/config`.
+Once configured, restart the :program:`nova-api` service.
-#. Restart the ``nova-compute`` service.
-Create instances with PCI passthrough devices
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Configuring a flavor or image
+-----------------------------
-The ``nova-scheduler`` selects a destination host that has PCI devices
-available with the specified ``vendor_id`` and ``product_id`` that matches the
-``alias`` from the flavor.
+Once the alias has been configured, it can be used for an flavor extra spec.
+For example, to request two of the PCI devices referenced by alias ``a1``, run:
.. code-block:: console
- # openstack server create --flavor m1.large --image cirros-0.3.5-x86_64-uec --wait test-pci
+ $ openstack flavor set m1.large --property "pci_passthrough:alias"="a1:2"
+
+For more information about the syntax for ``pci_passthrough:alias``, refer to
+:doc:`the documentation `.
+
+
+.. _pci-numa-affinity-policy:
+
+PCI-NUMA affinity policies
+--------------------------
+
+By default, the libvirt driver enforces strict NUMA affinity for PCI devices,
+be they PCI passthrough devices or neutron SR-IOV interfaces. This means that
+by default a PCI device must be allocated from the same host NUMA node as at
+least one of the instance's CPUs. This isn't always necessary, however, and you
+can configure this policy using the
+:nova:extra-spec:`hw:pci_numa_affinity_policy` flavor extra spec or equivalent
+image metadata property. There are three possible values allowed:
+
+**required**
+ This policy means that nova will boot instances with PCI devices **only**
+ if at least one of the NUMA nodes of the instance is associated with these
+ PCI devices. It means that if NUMA node info for some PCI devices could not
+ be determined, those PCI devices wouldn't be consumable by the instance.
+ This provides maximum performance.
+
+**socket**
+ This policy means that the PCI device must be affined to the same host
+ socket as at least one of the guest NUMA nodes. For example, consider a
+ system with two sockets, each with two NUMA nodes, numbered node 0 and node
+ 1 on socket 0, and node 2 and node 3 on socket 1. There is a PCI device
+ affined to node 0. An PCI instance with two guest NUMA nodes and the
+ ``socket`` policy can be affined to either:
+
+ * node 0 and node 1
+ * node 0 and node 2
+ * node 0 and node 3
+ * node 1 and node 2
+ * node 1 and node 3
+
+ The instance cannot be affined to node 2 and node 3, as neither of those
+ are on the same socket as the PCI device. If the other nodes are consumed
+ by other instances and only nodes 2 and 3 are available, the instance
+ will not boot.
+
+**preferred**
+ This policy means that ``nova-scheduler`` will choose a compute host
+ with minimal consideration for the NUMA affinity of PCI devices.
+ ``nova-compute`` will attempt a best effort selection of PCI devices
+ based on NUMA affinity, however, if this is not possible then
+ ``nova-compute`` will fall back to scheduling on a NUMA node that is not
+ associated with the PCI device.
+
+**legacy**
+ This is the default policy and it describes the current nova behavior.
+ Usually we have information about association of PCI devices with NUMA
+ nodes. However, some PCI devices do not provide such information. The
+ ``legacy`` value will mean that nova will boot instances with PCI device
+ if either:
+
+ * The PCI device is associated with at least one NUMA nodes on which the
+ instance will be booted
+
+ * There is no information about PCI-NUMA affinity available
+
+For example, to configure a flavor to use the ``preferred`` PCI NUMA affinity
+policy for any neutron SR-IOV interfaces attached by the user:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property hw:pci_numa_affinity_policy=preferred
+
+You can also configure this for PCI passthrough devices by specifying the
+policy in the alias configuration via :oslo.config:option:`pci.alias`. For more
+information, refer to :oslo.config:option:`the documentation `.
diff --git a/doc/source/admin/ports-with-resource-requests.rst b/doc/source/admin/ports-with-resource-requests.rst
new file mode 100644
index 00000000000..2a2a5d41ef6
--- /dev/null
+++ b/doc/source/admin/ports-with-resource-requests.rst
@@ -0,0 +1,90 @@
+=================================
+Using ports with resource request
+=================================
+
+Starting from microversion 2.72 nova supports creating servers with neutron
+ports having resource request visible as a admin-only port attribute
+``resource_request``. For example a neutron port has resource request if it has
+a QoS minimum bandwidth rule attached.
+
+The :neutron-doc:`Quality of Service (QoS): Guaranteed Bandwidth `
+document describes how to configure neutron to use this feature.
+
+Resource allocation
+~~~~~~~~~~~~~~~~~~~
+
+Nova collects and combines the resource request from each port in a boot
+request and sends one allocation candidate request to placement during
+scheduling so placement will make sure that the resource request of the ports
+are fulfilled. At the end of the scheduling nova allocates one candidate in
+placement. Therefore the requested resources for each port from a single boot
+request will be allocated under the server's allocation in placement.
+
+
+Resource Group policy
+~~~~~~~~~~~~~~~~~~~~~
+
+Nova represents the resource request of each neutron port as a separate
+:placement-doc:`Granular Resource Request group `
+when querying placement for allocation candidates. When a server create request
+includes more than one port with resource requests then more than one group
+will be used in the allocation candidate query. In this case placement requires
+to define the ``group_policy``. Today it is only possible via the
+``group_policy`` key of the :nova-doc:`flavor extra_spec `.
+The possible values are ``isolate`` and ``none``.
+
+When the policy is set to ``isolate`` then each request group and therefore the
+resource request of each neutron port will be fulfilled from separate resource
+providers. In case of neutron ports with ``vnic_type=direct`` or
+``vnic_type=macvtap`` this means that each port will use a virtual function
+from different physical functions.
+
+When the policy is set to ``none`` then the resource request of the neutron
+ports can be fulfilled from overlapping resource providers. In case of neutron
+ports with ``vnic_type=direct`` or ``vnic_type=macvtap`` this means the ports
+may use virtual functions from the same physical function.
+
+For neutron ports with ``vnic_type=normal`` the group policy defines the
+collocation policy on OVS bridge level so ``group_policy=none`` is a reasonable
+default value in this case.
+
+If the ``group_policy`` is missing from the flavor then the server create
+request will fail with 'No valid host was found' and a warning describing the
+missing policy will be logged.
+
+
+Virt driver support
+~~~~~~~~~~~~~~~~~~~
+
+Supporting neutron ports with ``vnic_type=direct`` or ``vnic_type=macvtap``
+depends on the capability of the virt driver. For the supported virt drivers
+see the :nova-doc:`Support matrix `
+
+If the virt driver on the compute host does not support the needed capability
+then the PCI claim will fail on the host and re-schedule will be triggered. It
+is suggested not to configure bandwidth inventory in the neutron agents on
+these compute hosts to avoid unnecessary reschedule.
+
+
+Extended resource request
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is expected that neutron 20.0.0 (Yoga) will implement an extended resource
+request format via the the ``port-resource-request-groups`` neutron API
+extension. As of nova 24.0.0 (Xena), nova already supports this extension if
+every nova-compute service is upgraded to Xena version and the
+:oslo.config:option:`upgrade_levels.compute` configuration does not prevent
+the computes from using the latest RPC version.
+
+The extended resource request allows a single Neutron port to request
+resources in more than one request groups. This also means that using just one
+port in a server create request would require a group policy to be provided
+in the flavor. Today the only case when a single port generates more than one
+request groups is when that port has QoS policy with both minimum bandwidth
+and minimum packet rate rules. Due to the placement resource model of these
+features in this case the two request groups will always be fulfilled from
+separate resource providers and therefore neither the ``group_policy=none``
+nor the ``group_policy=isolate`` flavor extra specs will result in any
+additional restriction on the placement of the resources. In the multi port
+case the Resource Group policy section above still applies.
+
diff --git a/doc/source/admin/quotas.rst b/doc/source/admin/quotas.rst
index e9f0935bbb0..c8000b3ba28 100644
--- a/doc/source/admin/quotas.rst
+++ b/doc/source/admin/quotas.rst
@@ -1,304 +1,370 @@
-=============================
-Manage Compute service quotas
-=============================
-
-As an administrative user, you can use the :command:`nova quota-*` commands,
-which are provided by the ``python-novaclient`` package, to update the Compute
-service quotas for a specific project or project user, as well as update the
-quota defaults for a new project.
-
-.. todo::
-
- At some point, probably in Queens, we need to scrub this page and mention
- the microversions that remove the proxy and network-related resource quotas.
-
-.. rubric:: Compute quota descriptions
-
-.. list-table::
- :header-rows: 1
- :widths: 10 40
-
- * - Quota name
- - Description
- * - cores
- - Number of instance cores (VCPUs) allowed per project.
- * - fixed-ips
- - Number of fixed IP addresses allowed per project. This number
- must be equal to or greater than the number of allowed
- instances.
- * - floating-ips
- - Number of floating IP addresses allowed per project.
- * - injected-file-content-bytes
- - Number of content bytes allowed per injected file.
- * - injected-file-path-bytes
- - Length of injected file path.
- * - injected-files
- - Number of injected files allowed per project.
- * - instances
- - Number of instances allowed per project.
- * - key-pairs
- - Number of key pairs allowed per user.
- * - metadata-items
- - Number of metadata items allowed per instance.
- * - ram
- - Megabytes of instance ram allowed per project.
- * - security-groups
- - Number of security groups per project.
- * - security-group-rules
- - Number of security group rules per project.
- * - server-groups
- - Number of server groups per project.
- * - server-group-members
- - Number of servers per server group.
-
-View and update Compute quotas for a project
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To view and update default quota values
----------------------------------------
-
-#. List all default quotas for all projects:
+=============
+Manage quotas
+=============
- .. code-block:: console
+.. note::
- $ openstack quota show --default
-
- +-----------------------------+-------+
- | Quota | Limit |
- +-----------------------------+-------+
- | instances | 10 |
- | cores | 20 |
- | ram | 51200 |
- | floating_ips | 10 |
- | fixed_ips | -1 |
- | metadata_items | 128 |
- | injected_files | 5 |
- | injected_file_content_bytes | 10240 |
- | injected_file_path_bytes | 255 |
- | key_pairs | 100 |
- | security_groups | 10 |
- | security_group_rules | 20 |
- | server_groups | 10 |
- | server_group_members | 10 |
- +-----------------------------+-------+
-
-#. Update a default value for a new project, for example:
+ This section provides deployment information about the quota feature. For
+ end-user information about quotas, including information about the type of
+ quotas available, refer to the :doc:`user guide `.
- .. code-block:: console
+To prevent system capacities from being exhausted without notification, you can
+set up quotas. Quotas are operational limits. For example, the number of
+gigabytes allowed for each project can be controlled so that cloud resources
+are optimized. Quotas can be enforced at both the project and the project-user
+level.
- $ openstack quota set --instances 15 default
+Starting in the 16.0.0 Pike release, the quota calculation system in nova was
+overhauled and the old reserve/commit/rollback flow was changed to `count
+resource usage`__ at the point of whatever operation is being performed, such
+as creating or resizing a server. A check will be performed by counting current
+usage for the relevant resource and then, if
+:oslo.config:option:`quota.recheck_quota` is True, another check will be
+performed to ensure the initial check is still valid.
-To view quota values for an existing project
---------------------------------------------
+By default resource usage is counted using the API and cell databases but nova
+can be configured to count some resource usage without using the cell
+databases. See `Quota usage from placement`_ for details.
-#. List the currently set quota values for a project:
+Using the command-line interface, you can manage quotas for nova, along with
+:cinder-doc:`cinder ` and :neutron-doc:`neutron
+`. You would typically change default values
+because, for example, a project requires more than ten volumes or 1 TB on a
+compute node.
- .. code-block:: console
+__ https://specs.openstack.org/openstack/nova-specs/specs/pike/implemented/cells-count-resources-to-check-quota-in-api.html
- $ openstack quota show PROJECT_NAME
-
- +-----------------------------+-------+
- | Quota | Limit |
- +-----------------------------+-------+
- | instances | 10 |
- | cores | 20 |
- | ram | 51200 |
- | floating_ips | 10 |
- | fixed_ips | -1 |
- | metadata_items | 128 |
- | injected_files | 5 |
- | injected_file_content_bytes | 10240 |
- | injected_file_path_bytes | 255 |
- | key_pairs | 100 |
- | security_groups | 10 |
- | security_group_rules | 20 |
- | server_groups | 10 |
- | server_group_members | 10 |
- +-----------------------------+-------+
-
-To update quota values for an existing project
-----------------------------------------------
-
-#. Obtain the project ID.
- .. code-block:: console
+Checking quota
+--------------
- $ project=$(openstack project show -f value -c id PROJECT_NAME)
+When calculating limits for a given resource and project, the following checks
+are made in order:
-#. Update a particular quota value.
+#. Project-specific limits
+
+ Depending on the resource, is there a project-specific limit on the
+ resource in either the ``quotas`` or ``project_user_quotas`` tables in the
+ database? If so, use that as the limit. You can create these resources
+ using:
.. code-block:: console
- $ openstack quota set --QUOTA_NAME QUOTA_VALUE PROJECT_OR_CLASS
+ $ openstack quota set --instances 5
+
+#. Default limits
- For example:
+ Check to see if there is a hard limit for the given resource in the
+ ``quota_classes`` table in the database for the ``default`` quota class. If
+ so, use that as the limit. You can modify the default quota limit for a
+ resource using:
.. code-block:: console
- $ openstack quota set --floating-ips 20 PROJECT_OR_CLASS
- $ openstack quota show PROJECT_NAME
- +-----------------------------+-------+
- | Quota | Limit |
- +-----------------------------+-------+
- | instances | 10 |
- | cores | 20 |
- | ram | 51200 |
- | floating_ips | 20 |
- | fixed_ips | -1 |
- | metadata_items | 128 |
- | injected_files | 5 |
- | injected_file_content_bytes | 10240 |
- | injected_file_path_bytes | 255 |
- | key_pairs | 100 |
- | security_groups | 10 |
- | security_group_rules | 20 |
- | server_groups | 10 |
- | server_group_members | 10 |
- +-----------------------------+-------+
+ $ openstack quota set --instances 5 --class default
.. note::
- To view a list of options for the :command:`openstack quota set` command,
- run:
+ Only the ``default`` class is supported by nova.
- .. code-block:: console
+#. Config-driven limits
- $ openstack help quota set
+ If the above does not provide a resource limit, then rely on the
+ configuration options in the :oslo.config:group:`quota` config group for
+ the default limits.
-View and update Compute quotas for a project user
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. note::
-To view quota values for a project user
----------------------------------------
+ The API sets the limit in the ``quota_classes`` table. Once a default limit
+ is set via the `default` quota class, that takes precedence over any
+ changes to that resource limit in the configuration options. In other
+ words, once you've changed things via the API, you either have to keep
+ those synchronized with the configuration values or remove the default
+ limit from the database manually as there is no REST API for removing quota
+ class values from the database.
-#. Place the user ID in a usable variable.
- .. code-block:: console
+.. _quota-usage-from-placement:
- $ projectUser=$(openstack user show -f value -c id USER_NAME)
+Quota usage from placement
+--------------------------
-#. Place the user's project ID in a usable variable, as follows:
+Starting in the Train (20.0.0) release, it is possible to configure quota usage
+counting of cores and RAM from the placement service and instances from
+instance mappings in the API database instead of counting resources from cell
+databases. This makes quota usage counting resilient in the presence of `down
+or poor-performing cells`__.
- .. code-block:: console
+Quota usage counting from placement is opt-in via the
+::oslo.config:option:`quota.count_usage_from_placement` config option:
- $ project=$(openstack project show -f value -c id PROJECT_NAME)
+.. code-block:: ini
-#. List the currently set quota values for a project user.
+ [quota]
+ count_usage_from_placement = True
- .. code-block:: console
+There are some things to note when opting in to counting quota usage from
+placement:
- $ nova quota-show --user $projectUser --tenant $project
+* Counted usage will not be accurate in an environment where multiple Nova
+ deployments are sharing a placement deployment because currently placement
+ has no way of partitioning resource providers between different Nova
+ deployments. Operators who are running multiple Nova deployments that share a
+ placement deployment should not set the
+ :oslo.config:option:`quota.count_usage_from_placement` configuration option
+ to ``True``.
- For example:
+* Behavior will be different for resizes. During a resize, resource allocations
+ are held on both the source and destination (even on the same host, see
+ https://bugs.launchpad.net/nova/+bug/1790204) until the resize is confirmed
+ or reverted. Quota usage will be inflated for servers in this state and
+ operators should weigh the advantages and disadvantages before enabling
+ :oslo.config:option:`quota.count_usage_from_placement`.
- .. code-block:: console
+* The ``populate_queued_for_delete`` and ``populate_user_id`` online data
+ migrations must be completed before usage can be counted from placement.
+ Until the data migration is complete, the system will fall back to legacy
+ quota usage counting from cell databases depending on the result of an EXISTS
+ database query during each quota check, if
+ :oslo.config:option:`quota.count_usage_from_placement` is set to ``True``.
+ Operators who want to avoid the performance hit from the EXISTS queries
+ should wait to set the :oslo.config:option:`quota.count_usage_from_placement`
+ configuration option to ``True`` until after they have completed their online
+ data migrations via ``nova-manage db online_data_migrations``.
- $ nova quota-show --user $projectUser --tenant $project
- +-----------------------------+-------+
- | Quota | Limit |
- +-----------------------------+-------+
- | instances | 10 |
- | cores | 20 |
- | ram | 51200 |
- | floating_ips | 20 |
- | fixed_ips | -1 |
- | metadata_items | 128 |
- | injected_files | 5 |
- | injected_file_content_bytes | 10240 |
- | injected_file_path_bytes | 255 |
- | key_pairs | 100 |
- | security_groups | 10 |
- | security_group_rules | 20 |
- | server_groups | 10 |
- | server_group_members | 10 |
- +-----------------------------+-------+
-
-To update quota values for a project user
------------------------------------------
-
-#. Place the user ID in a usable variable.
+* Behavior will be different for unscheduled servers in ``ERROR`` state. A
+ server in ``ERROR`` state that has never been scheduled to a compute host
+ will not have placement allocations, so it will not consume quota usage for
+ cores and ram.
- .. code-block:: console
+* Behavior will be different for servers in ``SHELVED_OFFLOADED`` state. A
+ server in ``SHELVED_OFFLOADED`` state will not have placement allocations, so
+ it will not consume quota usage for cores and ram. Note that because of this,
+ it will be possible for a request to unshelve a server to be rejected if the
+ user does not have enough quota available to support the cores and ram needed
+ by the server to be unshelved.
- $ projectUser=$(openstack user show -f value -c id USER_NAME)
+__ https://docs.openstack.org/api-guide/compute/down_cells.html
-#. Place the user's project ID in a usable variable, as follows:
- .. code-block:: console
+Known issues
+------------
- $ project=$(openstack project show -f value -c id PROJECT_NAME)
+If not :ref:`counting quota usage from placement `
+it is possible for down or poor-performing cells to impact quota calculations.
+See the :ref:`cells documentation ` for details.
-#. Update a particular quota value, as follows:
- .. code-block:: console
+Future plans
+------------
- $ nova quota-update --user $projectUser --QUOTA_NAME QUOTA_VALUE $project
+Hierarchical quotas
+~~~~~~~~~~~~~~~~~~~
- For example:
+There has long been a desire to support hierarchical or nested quotas
+leveraging support in the identity service for hierarchical projects.
+See the `unified limits`__ spec for details.
- .. code-block:: console
+__ https://review.opendev.org/#/c/602201/
- $ nova quota-update --user $projectUser --floating-ips 12 $project
- $ nova quota-show --user $projectUser --tenant $project
- +-----------------------------+-------+
- | Quota | Limit |
- +-----------------------------+-------+
- | instances | 10 |
- | cores | 20 |
- | ram | 51200 |
- | floating_ips | 12 |
- | fixed_ips | -1 |
- | metadata_items | 128 |
- | injected_files | 5 |
- | injected_file_content_bytes | 10240 |
- | injected_file_path_bytes | 255 |
- | key_pairs | 100 |
- | security_groups | 10 |
- | security_group_rules | 20 |
- | server_groups | 10 |
- | server_group_members | 10 |
- +-----------------------------+-------+
- .. note::
+Configuration
+-------------
+
+View and update default quota values
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To list all default quotas for a project, run:
+
+.. code-block:: console
+
+ $ openstack quota show --default
+
+.. note::
+
+ This lists default quotas for all services and not just nova.
+
+To update a default value for a new project, run:
+
+.. code-block:: console
+
+ $ openstack quota set --class --instances 15 default
+
+View and update quota values for a project or class
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To list quotas for a project, run:
+
+.. code-block:: console
+
+ $ openstack quota show PROJECT
+
+.. note::
+
+ This lists project quotas for all services and not just nova.
- To view a list of options for the :command:`nova quota-update` command,
- run:
+To update quotas for a project, run:
- .. code-block:: console
+.. code-block:: console
+
+ $ openstack quota set --QUOTA QUOTA_VALUE PROJECT
+
+To update quotas for a class, run:
+
+.. code-block:: console
- $ nova help quota-update
+ $ openstack quota set --class --QUOTA QUOTA_VALUE CLASS
-To display the current quota usage for a project user
------------------------------------------------------
+.. note::
+
+ Only the ``default`` class is supported by nova.
+
+For example:
+
+.. code-block:: console
-Use :command:`nova limits` to get a list of the
-current quota values and the current quota usage:
+ $ openstack quota set --instances 12 my-project
+ $ openstack quota show my-project
+ +----------------------+----------------------------------+
+ | Field | Value |
+ +----------------------+----------------------------------+
+ | backup-gigabytes | 1000 |
+ | backups | 10 |
+ | cores | 32 |
+ | fixed-ips | -1 |
+ | floating-ips | 10 |
+ | gigabytes | 1000 |
+ | health_monitors | None |
+ | injected-file-size | 10240 |
+ | injected-files | 5 |
+ | injected-path-size | 255 |
+ | instances | 12 |
+ | key-pairs | 100 |
+ | l7_policies | None |
+ | listeners | None |
+ | load_balancers | None |
+ | location | None |
+ | name | None |
+ | networks | 20 |
+ | per-volume-gigabytes | -1 |
+ | pools | None |
+ | ports | 60 |
+ | project | c8156b55ec3b486193e73d2974196993 |
+ | project_name | project |
+ | properties | 128 |
+ | ram | 65536 |
+ | rbac_policies | 10 |
+ | routers | 10 |
+ | secgroup-rules | 50 |
+ | secgroups | 50 |
+ | server-group-members | 10 |
+ | server-groups | 10 |
+ | snapshots | 10 |
+ | subnet_pools | -1 |
+ | subnets | 20 |
+ | volumes | 10 |
+ +----------------------+----------------------------------+
+
+To view a list of options for the :command:`openstack quota show` and
+:command:`openstack quota set` commands, run:
.. code-block:: console
- $ nova limits --tenant PROJECT_NAME
-
- +------+-----+-------+--------+------+----------------+
- | Verb | URI | Value | Remain | Unit | Next_Available |
- +------+-----+-------+--------+------+----------------+
- +------+-----+-------+--------+------+----------------+
-
- +--------------------+------+-------+
- | Name | Used | Max |
- +--------------------+------+-------+
- | Cores | 0 | 20 |
- | Instances | 0 | 10 |
- | Keypairs | - | 100 |
- | Personality | - | 5 |
- | Personality Size | - | 10240 |
- | RAM | 0 | 51200 |
- | Server Meta | - | 128 |
- | ServerGroupMembers | - | 10 |
- | ServerGroups | 0 | 10 |
- +--------------------+------+-------+
+ $ openstack quota show --help
+ $ openstack quota set --help
+
+View and update quota values for a project user
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. note::
- The :command:`nova limits` command generates an empty
- table as a result of the Compute API, which prints an
- empty list for backward compatibility purposes.
+ User-specific quotas are legacy and will be removed when migration to
+ :keystone-doc:`unified limits ` is complete.
+ User-specific quotas were added as a way to provide two-level hierarchical
+ quotas and this feature is already being offered in unified limits. For
+ this reason, the below commands have not and will not be ported to
+ openstackclient.
+
+To show quotas for a specific project user, run:
+
+.. code-block:: console
+
+ $ nova quota-show --user USER PROJECT
+
+To update quotas for a specific project user, run:
+
+.. code-block:: console
+
+ $ nova quota-update --user USER --QUOTA QUOTA_VALUE PROJECT
+
+For example:
+
+.. code-block:: console
+
+ $ projectUser=$(openstack user show -f value -c id USER)
+ $ project=$(openstack project show -f value -c id PROJECT)
+
+ $ nova quota-update --user $projectUser --instance 12 $project
+ $ nova quota-show --user $projectUser --tenant $project
+ +-----------------------------+-------+
+ | Quota | Limit |
+ +-----------------------------+-------+
+ | instances | 12 |
+ | cores | 20 |
+ | ram | 51200 |
+ | floating_ips | 10 |
+ | fixed_ips | -1 |
+ | metadata_items | 128 |
+ | injected_files | 5 |
+ | injected_file_content_bytes | 10240 |
+ | injected_file_path_bytes | 255 |
+ | key_pairs | 100 |
+ | security_groups | 10 |
+ | security_group_rules | 20 |
+ | server_groups | 10 |
+ | server_group_members | 10 |
+ +-----------------------------+-------+
+
+To view the quota usage for the current user, run:
+
+.. code-block:: console
+
+ $ nova limits --tenant PROJECT
+
+For example:
+
+.. code-block:: console
+
+ $ nova limits --tenant my-project
+ +------+-----+-------+--------+------+----------------+
+ | Verb | URI | Value | Remain | Unit | Next_Available |
+ +------+-----+-------+--------+------+----------------+
+ +------+-----+-------+--------+------+----------------+
+
+ +--------------------+------+-------+
+ | Name | Used | Max |
+ +--------------------+------+-------+
+ | Cores | 0 | 20 |
+ | Instances | 0 | 10 |
+ | Keypairs | - | 100 |
+ | Personality | - | 5 |
+ | Personality Size | - | 10240 |
+ | RAM | 0 | 51200 |
+ | Server Meta | - | 128 |
+ | ServerGroupMembers | - | 10 |
+ | ServerGroups | 0 | 10 |
+ +--------------------+------+-------+
+
+.. note::
+
+ The :command:`nova limits` command generates an empty table as a result of
+ the Compute API, which prints an empty list for backward compatibility
+ purposes.
+
+To view a list of options for the :command:`nova quota-show` and
+:command:`nova quota-update` commands, run:
+
+.. code-block:: console
+
+ $ nova help quota-show
+ $ nova help quota-update
diff --git a/doc/source/admin/quotas2.rst b/doc/source/admin/quotas2.rst
deleted file mode 100644
index 19cd8d50751..00000000000
--- a/doc/source/admin/quotas2.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-.. _manage-quotas:
-
-=============
-Manage quotas
-=============
-
-.. todo:: Merge this into 'quotas.rst'
-
-To prevent system capacities from being exhausted without notification, you can
-set up quotas. Quotas are operational limits. For example, the number of
-gigabytes allowed for each project can be controlled so that cloud resources
-are optimized. Quotas can be enforced at both the project and the project-user
-level.
-
-Using the command-line interface, you can manage quotas for the OpenStack
-Compute service, the OpenStack Block Storage service, and the OpenStack
-Networking service.
-
-The cloud operator typically changes default values because a project requires
-more than ten volumes or 1 TB on a compute node.
-
-.. note::
-
- To view all projects, run:
-
- .. code-block:: console
-
- $ openstack project list
- +----------------------------------+----------+
- | ID | Name |
- +----------------------------------+----------+
- | e66d97ac1b704897853412fc8450f7b9 | admin |
- | bf4a37b885fe46bd86e999e50adad1d3 | services |
- | 21bd1c7c95234fd28f589b60903606fa | tenant01 |
- | f599c5cd1cba4125ae3d7caed08e288c | tenant02 |
- +----------------------------------+----------+
-
- To display all current users for a project, run:
-
- .. code-block:: console
-
- $ openstack user list --project PROJECT_NAME
- +----------------------------------+--------+
- | ID | Name |
- +----------------------------------+--------+
- | ea30aa434ab24a139b0e85125ec8a217 | demo00 |
- | 4f8113c1d838467cad0c2f337b3dfded | demo01 |
- +----------------------------------+--------+
-
-Use :samp:`openstack quota show {PROJECT_NAME}` to list all quotas for a
-project.
-
-Use :samp:`openstack quota set {PROJECT_NAME} {--parameters}` to set quota
-values.
diff --git a/doc/source/admin/real-time.rst b/doc/source/admin/real-time.rst
new file mode 100644
index 00000000000..cad78df93a5
--- /dev/null
+++ b/doc/source/admin/real-time.rst
@@ -0,0 +1,152 @@
+=========
+Real Time
+=========
+
+.. versionadded:: 13.0.0 (Mitaka)
+
+Nova supports configuring `real-time policies`__ for instances. This builds upon
+the improved performance offered by :doc:`CPU pinning ` by
+providing stronger guarantees for worst case scheduler latency for vCPUs.
+
+.. __: https://en.wikipedia.org/wiki/Real-time_computing
+
+
+Enabling Real-Time
+------------------
+
+Currently the creation of real-time instances is only supported when using the
+libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` of
+``kvm`` or ``qemu``. It requires extensive configuration of the host and this
+document provides but a rough overview of the changes required. Configuration
+will vary depending on your hardware, BIOS configuration, host and guest OS'
+and application.
+
+BIOS configuration
+~~~~~~~~~~~~~~~~~~
+
+Configure your host BIOS as recommended in the `rt-wiki`__ page.
+The most important steps are:
+
+- Disable power management, including CPU sleep states
+- Disable SMT (hyper-threading) or any option related to logical processors
+
+These are standard steps used in benchmarking as both sets of features can
+result in non-deterministic behavior.
+
+.. __: https://rt.wiki.kernel.org/index.php/HOWTO:_Build_an_RT-application
+
+OS configuration
+~~~~~~~~~~~~~~~~
+
+This is inherently specific to the distro used, however, there are some common
+steps:
+
+- Install the real-time (preemptible) kernel (``PREEMPT_RT_FULL``) and
+ real-time KVM modules
+- Configure hugepages
+- Isolate host cores to be used for instances from the kernel
+- Disable features like CPU frequency scaling (e.g. P-States on Intel
+ processors)
+
+RHEL and RHEL-derived distros like CentOS provide packages in their
+repositories to accomplish. The ``kernel-rt`` and ``kernel-rt-kvm``
+packages will provide the real-time kernel and real-time KVM module,
+respectively, while the ``tuned-profiles-realtime`` package will provide
+`tuned`__ profiles to configure the host for real-time workloads. You should
+refer to your distro documentation for more information.
+
+.. __: https://tuned-project.org/
+
+Validation
+~~~~~~~~~~
+
+Once your BIOS and the host OS have been configured, you can validate
+"real-time readiness" using the ``hwlatdetect`` and ``rteval`` utilities. On
+RHEL and RHEL-derived hosts, you can install these using the ``rt-tests``
+package. More information about the ``rteval`` tool can be found `here`__.
+
+.. __: https://git.kernel.org/pub/scm/utils/rteval/rteval.git/tree/README
+
+
+Configuring a flavor or image
+-----------------------------
+
+.. versionchanged:: 22.0.0 (Victoria)
+
+ Previously, it was necessary to specify
+ :nova:extra-spec:`hw:cpu_realtime_mask` when realtime mode was enabled via
+ :nova:extra-spec:`hw:cpu_realtime`. Starting in Victoria, it is possible
+ to omit this when an emulator thread policy is configured using the
+ :nova:extra-spec:`hw:emulator_threads_policy` extra spec, thus allowing all
+ guest cores to be be allocated as real-time cores.
+
+.. versionchanged:: 22.0.0 (Victoria)
+
+ Previously, a leading caret was necessary when specifying the value for
+ :nova:extra-spec:`hw:cpu_realtime_mask` and omitting it would be equivalent
+ to not setting the mask, resulting in a failure to spawn the instance.
+
+Compared to configuring the host, configuring the guest is relatively trivial
+and merely requires a combination of flavor extra specs and image metadata
+properties, along with a suitable real-time guest OS.
+
+Enable real-time by setting the :nova:extra-spec:`hw:cpu_realtime` flavor extra
+spec to ``yes`` or a truthy value. When this is configured, it is necessary to
+specify where guest overhead processes should be scheduled to. This can be
+accomplished in one of three ways. Firstly, the
+:nova:extra-spec:`hw:cpu_realtime_mask` extra spec or equivalent image metadata
+property can be used to indicate which guest cores should be scheduled as
+real-time cores, leaving the remainder to be scheduled as non-real-time cores
+and to handle overhead processes. For example, to allocate the first two cores
+of an 8 core instance as the non-real-time cores:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property hw:cpu_realtime=yes \
+ --property hw:cpu_realtime_mask=2-7 # so 0,1 are non-real-time
+
+In this configuration, any non-real-time cores configured will have an implicit
+``dedicated`` :ref:`CPU pinning policy ` applied. It is
+possible to apply a ``shared`` policy for these non-real-time cores by
+specifying the ``mixed`` :ref:`CPU pinning policy ` via
+the :nova:extra-spec:`hw:cpu_policy` extra spec. This can be useful to increase
+resource utilization of the host. For example:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property hw:cpu_policy=mixed \
+ --property hw:cpu_realtime=yes \
+ --property hw:cpu_realtime_mask=2-7 # so 0,1 are non-real-time and unpinned
+
+Finally, you can explicitly :ref:`offload guest overhead processes to another
+host core ` using the
+:nova:extra-spec:`hw:emulator_threads_policy` extra spec. For example:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property hw:cpu_realtime=yes \
+ --property hw:emulator_thread_policy=share
+
+.. note::
+
+ Emulator thread pinning requires additional host configuration.
+ Refer to :ref:`the documentation ` for
+ more information.
+
+In addition to configuring the instance CPUs, it is also likely that you will
+need to configure guest huge pages. For information on how to configure these,
+refer to :doc:`the documentation `
+
+References
+----------
+
+* `Libvirt real time instances (spec)`__
+* `The Real Time Linux collaborative project`__
+* `Deploying Real Time OpenStack`__
+
+.. __: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/libvirt-real-time.html
+.. __: https://wiki.linuxfoundation.org/realtime/start
+.. __: https://that.guru/blog/deploying-real-time-openstack/
diff --git a/doc/source/admin/remote-console-access.rst b/doc/source/admin/remote-console-access.rst
index f5a2f01556b..01ef44810c3 100644
--- a/doc/source/admin/remote-console-access.rst
+++ b/doc/source/admin/remote-console-access.rst
@@ -2,111 +2,164 @@
Configure remote console access
===============================
-To provide a remote console or remote desktop access to guest virtual machines,
-use VNC, SPICE HTML5 or Serial through either the OpenStack dashboard or the
-command line. Best practice is to select only one of them to run.
+OpenStack provides a number of different methods to interact with your guests:
+VNC, SPICE, Serial, RDP or MKS. If configured, these can be accessed by users
+through the OpenStack dashboard or the command line. This document outlines how
+these different technologies can be configured.
-.. _about-nova-consoleauth:
-About nova-consoleauth
-----------------------
+Overview
+--------
-The client proxies leverage a shared service to manage token authentication
-called ``nova-consoleauth``. This service must be running for either proxy to
-work. Many proxies of either type can be run against a single
-``nova-consoleauth`` service in a cluster configuration.
+It is considered best practice to deploy only one of the consoles types and
+not all console types are supported by all compute drivers. Regardless of what
+option is chosen, a console proxy service is required. These proxy services are
+responsible for the following:
-Do not confuse the ``nova-consoleauth`` shared service with ``nova-console``,
-which is a XenAPI-specific service that most recent VNC proxy architectures do
-not use.
+- Provide a bridge between the public network where the clients live and the
+ private network where the servers with consoles live.
-.. deprecated:: 18.0.0
+- Mediate token authentication.
- ``nova-consoleauth`` is deprecated since 18.0.0 (Rocky) and will be removed
- in an upcoming release.
+- Transparently handle hypervisor-specific connection details to provide a
+ uniform client experience.
-SPICE console
--------------
+For some combinations of compute driver and console driver, these proxy
+services are provided by the hypervisor or another service. For all others,
+nova provides services to handle this proxying. Consider a noVNC-based VNC
+console connection for example:
-OpenStack Compute supports VNC consoles to guests. The VNC protocol is fairly
-limited, lacking support for multiple monitors, bi-directional audio, reliable
-cut-and-paste, video streaming and more. SPICE is a new protocol that aims to
-address the limitations in VNC and provide good remote desktop support.
+#. A user connects to the API and gets an ``access_url`` such as,
+ ``http://ip:port/?path=%3Ftoken%3Dxyz``.
-SPICE support in OpenStack Compute shares a similar architecture to the VNC
-implementation. The OpenStack dashboard uses a SPICE-HTML5 widget in its
-console tab that communicates to the ``nova-spicehtml5proxy`` service by using
-SPICE-over-websockets. The ``nova-spicehtml5proxy`` service communicates
-directly with the hypervisor process by using SPICE.
+#. The user pastes the URL in a browser or uses it as a client parameter.
-VNC must be explicitly disabled to get access to the SPICE console. Set the
-``vnc_enabled`` option to ``False`` in the ``[DEFAULT]`` section to disable the
-VNC console.
+#. The browser or client connects to the proxy.
-Use the following options to configure SPICE as the console for OpenStack
-Compute:
+#. The proxy authorizes the token for the user, and maps the token to the
+ *private* host and port of the VNC server for an instance.
-.. code-block:: console
+ The compute host specifies the address that the proxy should use to connect
+ through the :oslo.config:option:`vnc.server_proxyclient_address` option. In
+ this way, the VNC proxy works as a bridge between the public network and
+ private host network.
- [spice]
- agent_enabled = False
- enabled = True
- html5proxy_base_url = http://IP_ADDRESS:6082/spice_auto.html
- html5proxy_host = 0.0.0.0
- html5proxy_port = 6082
- keymap = en-us
- server_listen = 127.0.0.1
- server_proxyclient_address = 127.0.0.1
+#. The proxy initiates the connection to VNC server and continues to proxy
+ until the session ends.
-Replace ``IP_ADDRESS`` with the management interface IP address of the
-controller or the VIP.
+This means a typical deployment with noVNC-based VNC consoles will have the
+following components:
-VNC console proxy
------------------
+- One or more :program:`nova-novncproxy` service. Supports browser-based noVNC
+ clients. For simple deployments, this service typically runs on the same
+ machine as :program:`nova-api` because it operates as a proxy between the
+ public network and the private compute host network.
-The VNC proxy is an OpenStack component that enables compute service users to
-access their instances through VNC clients.
+- One or more :program:`nova-compute` services. Hosts the instances for which
+ consoles are provided.
+
+.. todo::
+
+ The below diagram references :program:`nova-consoleauth` and needs to be
+ updated.
+
+This particular example is illustrated below.
+
+.. figure:: figures/SCH_5009_V00_NUAC-VNC_OpenStack.png
+ :alt: noVNC process
+ :width: 95%
+
+
+noVNC-based VNC console
+-----------------------
+
+VNC is a graphical console with wide support among many hypervisors and
+clients. noVNC provides VNC support through a web browser.
.. note::
- The web proxy console URLs do not support the websocket protocol scheme
- (ws://) on python versions less than 2.7.4.
+ It has `been reported`__ that versions of noVNC older than 0.6 do not work
+ with the :program:`nova-novncproxy` service.
-The VNC console connection works as follows:
+ If using non-US key mappings, you need at least noVNC 1.0.0 for `a fix`__.
-#. A user connects to the API and gets an ``access_url`` such as,
- ``http://ip:port/?token=xyz``.
+ If using VMware ESX/ESXi hypervisors, you need at least noVNC 1.1.0 for
+ `a fix`__.
-#. The user pastes the URL in a browser or uses it as a client parameter.
+ __ https://bugs.launchpad.net/nova/+bug/1752896
+ __ https://github.com/novnc/noVNC/commit/99feba6ba8fee5b3a2b2dc99dc25e9179c560d31
+ __ https://github.com/novnc/noVNC/commit/2c813a33fe6821f5af737327c50f388052fa963b
-#. The browser or client connects to the proxy.
+Configuration
+~~~~~~~~~~~~~
-#. The proxy talks to ``nova-consoleauth`` to authorize the token for the user,
- and maps the token to the *private* host and port of the VNC server for an
- instance.
+To enable the noVNC VNC console service, you must configure both the
+:program:`nova-novncproxy` service and the :program:`nova-compute` service.
+Most options are defined in the :oslo.config:group:`vnc` group.
- The compute host specifies the address that the proxy should use to connect
- through the ``nova.conf`` file option, ``server_proxyclient_address``. In
- this way, the VNC proxy works as a bridge between the public network and
- private host network.
+The :program:`nova-novncproxy` service accepts the following options:
-#. The proxy initiates the connection to VNC server and continues to proxy
- until the session ends.
+- :oslo.config:option:`daemon`
+- :oslo.config:option:`ssl_only`
+- :oslo.config:option:`source_is_ipv6`
+- :oslo.config:option:`cert`
+- :oslo.config:option:`key`
+- :oslo.config:option:`web`
+- :oslo.config:option:`console.ssl_ciphers`
+- :oslo.config:option:`console.ssl_minimum_version`
+- :oslo.config:option:`vnc.novncproxy_host`
+- :oslo.config:option:`vnc.novncproxy_port`
-The proxy also tunnels the VNC protocol over WebSockets so that the ``noVNC``
-client can talk to VNC servers. In general, the VNC proxy:
+If using the libvirt compute driver and enabling :ref:`vnc-security`, the
+following additional options are supported:
-- Bridges between the public network where the clients live and the private
- network where VNC servers live.
+- :oslo.config:option:`vnc.auth_schemes`
+- :oslo.config:option:`vnc.vencrypt_client_key`
+- :oslo.config:option:`vnc.vencrypt_client_cert`
+- :oslo.config:option:`vnc.vencrypt_ca_certs`
-- Mediates token authentication.
+For example, to configure this via a ``nova-novncproxy.conf`` file:
-- Transparently deals with hypervisor-specific connection details to provide a
- uniform client experience.
+.. code-block:: ini
-.. figure:: figures/SCH_5009_V00_NUAC-VNC_OpenStack.png
- :alt: noVNC process
- :width: 95%
+ [vnc]
+ novncproxy_host = 0.0.0.0
+ novncproxy_port = 6082
+
+.. note::
+
+ This doesn't show configuration with security. For information on how to
+ configure this, refer to :ref:`vnc-security` below.
+
+The :program:`nova-compute` service requires the following options to configure
+noVNC-based VNC console support:
+
+- :oslo.config:option:`vnc.enabled`
+- :oslo.config:option:`vnc.novncproxy_base_url`
+- :oslo.config:option:`vnc.server_listen`
+- :oslo.config:option:`vnc.server_proxyclient_address`
+
+If using the VMware compute driver, the following additional options are
+supported:
+
+- :oslo.config:option:`vmware.vnc_port`
+- :oslo.config:option:`vmware.vnc_port_total`
+
+For example, to configure this via a ``nova.conf`` file:
+
+.. code-block:: ini
+
+ [vnc]
+ enabled = True
+ novncproxy_base_url = http://IP_ADDRESS:6082/vnc_auto.html
+ server_listen = 127.0.0.1
+ server_proxyclient_address = 127.0.0.1
+
+Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
+by the outside world. For example, this may be the management interface IP
+address of the controller or the VIP.
+
+.. _vnc-security:
VNC proxy security
~~~~~~~~~~~~~~~~~~
@@ -142,7 +195,7 @@ certificates:
The authority certificate used to sign ``server-cert.pem`` and sign the VNC
proxy server certificates.
-The certificates must have v3 basic constraints [3]_ present to indicate the
+The certificates must have v3 basic constraints [2]_ present to indicate the
permitted key use and purpose data.
We recommend using a dedicated certificate authority solely for the VNC
@@ -151,7 +204,7 @@ for the OpenStack deployment. This is because libvirt does not currently have
a mechanism to restrict what certificates can be presented by the proxy server.
For further details on certificate creation, consult the QEMU manual page
-documentation on VNC server certificate setup [2]_.
+documentation on VNC server certificate setup [1]_.
Configure libvirt to enable the VeNCrypt authentication scheme for the VNC
server. In :file:`/etc/libvirt/qemu.conf`, uncomment the following settings:
@@ -170,9 +223,9 @@ server. In :file:`/etc/libvirt/qemu.conf`, uncomment the following settings:
After editing :file:`qemu.conf`, the ``libvirtd`` service must be restarted:
-.. code:: shell
+.. code-block:: shell
- $ systemctl restart libvirtd.service
+ $ systemctl restart libvirtd.service
Changes will not apply to any existing running guests on the Compute node, so
this configuration should be done before launching any instances.
@@ -185,10 +238,10 @@ scheme, which does no checking. Therefore, it is necessary to enable the
``vencrypt`` authentication scheme by editing the :file:`nova.conf` file to
set.
-.. code::
+.. code-block:: ini
- [vnc]
- auth_schemes=vencrypt,none
+ [vnc]
+ auth_schemes=vencrypt,none
The :oslo.config:option:`vnc.auth_schemes` values should be listed in order
of preference. If enabling VeNCrypt on an existing deployment which already has
@@ -224,188 +277,275 @@ certificates to the noVNC proxy.
The certificate authority cert used to sign ``client-cert.pem`` and sign the
compute node VNC server certificates.
-The certificates must have v3 basic constraints [3]_ present to indicate the
+The certificates must have v3 basic constraints [2]_ present to indicate the
permitted key use and purpose data.
Once the certificates have been created, the noVNC console proxy service must
be told where to find them. This requires editing :file:`nova.conf` to set.
-.. code::
+.. code-block:: ini
- [vnc]
- vencrypt_client_key=/etc/pki/nova-novncproxy/client-key.pem
- vencrypt_client_cert=/etc/pki/nova-novncproxy/client-cert.pem
- vencrypt_ca_certs=/etc/pki/nova-novncproxy/ca-cert.pem
+ [vnc]
+ vencrypt_client_key=/etc/pki/nova-novncproxy/client-key.pem
+ vencrypt_client_cert=/etc/pki/nova-novncproxy/client-cert.pem
+ vencrypt_ca_certs=/etc/pki/nova-novncproxy/ca-cert.pem
-VNC configuration options
-~~~~~~~~~~~~~~~~~~~~~~~~~
-To customize the VNC console, use the following configuration options in your
-``nova.conf`` file:
+SPICE console
+-------------
-.. note::
+The VNC protocol is fairly limited, lacking support for multiple monitors,
+bi-directional audio, reliable cut-and-paste, video streaming and more. SPICE
+is a new protocol that aims to address the limitations in VNC and provide good
+remote desktop support.
- To support :ref:`live migration `,
- you cannot specify a specific IP address for ``server_listen``, because
- that IP address does not exist on the destination host.
-
-.. list-table:: **Description of VNC configuration options**
- :header-rows: 1
- :widths: 25 25
-
- * - Configuration option = Default value
- - Description
- * - **[DEFAULT]**
- -
- * - ``daemon = False``
- - (BoolOpt) Become a daemon (background process)
- * - ``key = None``
- - (StrOpt) SSL key file (if separate from cert)
- * - ``novncproxy_host = 0.0.0.0``
- - (StrOpt) Host on which to listen for incoming requests
- * - ``novncproxy_port = 6080``
- - (IntOpt) Port on which to listen for incoming requests
- * - ``record = False``
- - (BoolOpt) Record sessions to FILE.[session_number]
- * - ``source_is_ipv6 = False``
- - (BoolOpt) Source is ipv6
- * - ``ssl_only = False``
- - (BoolOpt) Disallow non-encrypted connections
- * - ``web = /usr/share/spice-html5``
- - (StrOpt) Run webserver on same port. Serve files from DIR.
- * - **[vmware]**
- -
- * - ``vnc_port = 5900``
- - (IntOpt) VNC starting port
- * - ``vnc_port_total = 10000``
- - vnc_port_total = 10000
- * - **[vnc]**
- -
- * - enabled = True
- - (BoolOpt) Enable VNC related features
- * - novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html
- - (StrOpt) Location of VNC console proxy, in the form
- "http://127.0.0.1:6080/vnc_auto.html"
- * - server_listen = 127.0.0.1
- - (StrOpt) IP address on which instance vncservers should listen
- * - server_proxyclient_address = 127.0.0.1
- - (StrOpt) The address to which proxy clients (like nova-xvpvncproxy)
- should connect
- * - xvpvncproxy_base_url = http://127.0.0.1:6081/console
- - (StrOpt) Location of nova xvp VNC console proxy, in the form
- "http://127.0.0.1:6081/console"
+SPICE support in OpenStack Compute shares a similar architecture to the VNC
+implementation. The OpenStack dashboard uses a SPICE-HTML5 widget in its
+console tab that communicates with the :program:`nova-spicehtml5proxy` service
+by using SPICE-over-websockets. The :program:`nova-spicehtml5proxy` service
+communicates directly with the hypervisor process by using SPICE.
-.. note::
+Configuration
+~~~~~~~~~~~~~
- - The ``server_proxyclient_address`` defaults to ``127.0.0.1``, which is
- the address of the compute host that Compute instructs proxies to use when
- connecting to instance servers.
+.. important::
- - For all-in-one XenServer domU deployments, set this to ``169.254.0.1.``
+ VNC must be explicitly disabled to get access to the SPICE console. Set the
+ :oslo.config:option:`vnc.enabled` option to ``False`` to disable the
+ VNC console.
- - For multi-host XenServer domU deployments, set to a ``dom0 management IP``
- on the same network as the proxies.
+To enable the SPICE console service, you must configure both the
+:program:`nova-spicehtml5proxy` service and the :program:`nova-compute`
+service. Most options are defined in the :oslo.config:group:`spice` group.
- - For multi-host libvirt deployments, set to a host management IP on the
- same network as the proxies.
+The :program:`nova-spicehtml5proxy` service accepts the following options.
-Typical deployment
-~~~~~~~~~~~~~~~~~~
+- :oslo.config:option:`daemon`
+- :oslo.config:option:`ssl_only`
+- :oslo.config:option:`source_is_ipv6`
+- :oslo.config:option:`cert`
+- :oslo.config:option:`key`
+- :oslo.config:option:`web`
+- :oslo.config:option:`console.ssl_ciphers`
+- :oslo.config:option:`console.ssl_minimum_version`
+- :oslo.config:option:`spice.html5proxy_host`
+- :oslo.config:option:`spice.html5proxy_port`
-A typical deployment has the following components:
+For example, to configure this via a ``nova-spicehtml5proxy.conf`` file:
-- A ``nova-consoleauth`` process. Typically runs on the controller host.
+.. code-block:: ini
-- One or more ``nova-novncproxy`` services. Supports browser-based noVNC
- clients. For simple deployments, this service typically runs on the same
- machine as ``nova-api`` because it operates as a proxy between the public
- network and the private compute host network.
+ [spice]
+ html5proxy_host = 0.0.0.0
+ html5proxy_port = 6082
-- One or more ``nova-xvpvncproxy`` services. Supports the special Java client
- discussed here. For simple deployments, this service typically runs on the
- same machine as ``nova-api`` because it acts as a proxy between the public
- network and the private compute host network.
+The :program:`nova-compute` service requires the following options to configure
+SPICE console support.
-- One or more compute hosts. These compute hosts must have correctly configured
- options, as follows.
+- :oslo.config:option:`spice.enabled`
+- :oslo.config:option:`spice.agent_enabled`
+- :oslo.config:option:`spice.html5proxy_base_url`
+- :oslo.config:option:`spice.server_listen`
+- :oslo.config:option:`spice.server_proxyclient_address`
-nova-novncproxy (noVNC)
-~~~~~~~~~~~~~~~~~~~~~~~
+For example, to configure this via a ``nova.conf`` file:
-You must install the noVNC package, which contains the ``nova-novncproxy``
-service. As root, run the following command:
+.. code-block:: ini
-.. code-block:: console
+ [spice]
+ agent_enabled = False
+ enabled = True
+ html5proxy_base_url = http://IP_ADDRESS:6082/spice_auto.html
+ server_listen = 127.0.0.1
+ server_proxyclient_address = 127.0.0.1
- # apt-get install nova-novncproxy
+Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
+by the outside world. For example, this may be the management interface IP
+address of the controller or the VIP.
-.. note::
- It has `been reported`_ that versions of noVNC older than 0.6 do not work
- with the ``nova-novncproxy`` service.
+Serial
+------
- If using non-US key mappings, then you need at least noVNC 1.0.0 for `a fix
- `_.
+Serial consoles provide an alternative to graphical consoles like VNC or SPICE.
+They work a little differently to graphical consoles so an example is
+beneficial. The example below uses these nodes:
-.. _been reported: https://bugs.launchpad.net/nova/+bug/1752896
+* controller node with IP ``192.168.50.100``
+* compute node 1 with IP ``192.168.50.104``
+* compute node 2 with IP ``192.168.50.105``
-The service starts automatically on installation.
+Here's the general flow of actions:
-To restart the service, run:
+.. figure:: figures/serial-console-flow.svg
+ :width: 100%
+ :alt: The serial console flow
-.. code-block:: console
+1. The user requests a serial console connection string for an instance
+ from the REST API.
+2. The :program:`nova-api` service asks the :program:`nova-compute` service,
+ which manages that instance, to fulfill that request.
+3. That connection string gets used by the user to connect to the
+ :program:`nova-serialproxy` service.
+4. The :program:`nova-serialproxy` service then proxies the console interaction
+ to the port of the compute node where the instance is running. That port
+ gets forwarded by the hypervisor (or ironic conductor, for ironic) to the
+ guest.
- # service nova-novncproxy restart
+Configuration
+~~~~~~~~~~~~~
-The configuration option parameter should point to your ``nova.conf`` file,
-which includes the message queue server address and credentials.
+To enable the serial console service, you must configure both the
+:program:`nova-serialproxy` service and the :program:`nova-compute` service.
+Most options are defined in the :oslo.config:group:`serial_console` group.
-By default, ``nova-novncproxy`` binds on ``0.0.0.0:6080``.
+The :program:`nova-serialproxy` service accepts the following options.
-To connect the service to your Compute deployment, add the following
-configuration options to your ``nova.conf`` file:
+- :oslo.config:option:`daemon`
+- :oslo.config:option:`ssl_only`
+- :oslo.config:option:`source_is_ipv6`
+- :oslo.config:option:`cert`
+- :oslo.config:option:`key`
+- :oslo.config:option:`web`
+- :oslo.config:option:`console.ssl_ciphers`
+- :oslo.config:option:`console.ssl_minimum_version`
+- :oslo.config:option:`serial_console.serialproxy_host`
+- :oslo.config:option:`serial_console.serialproxy_port`
-- ``server_listen=0.0.0.0``
+For example, to configure this via a ``nova-serialproxy.conf`` file:
- Specifies the address on which the VNC service should bind. Make sure it is
- assigned one of the compute node interfaces. This address is the one used by
- your domain file.
+.. code-block:: ini
- .. code-block:: console
+ [serial_console]
+ serialproxy_host = 0.0.0.0
+ serialproxy_port = 6083
-
+The :program:`nova-compute` service requires the following options to configure
+serial console support.
- .. note::
+- :oslo.config:option:`serial_console.enabled`
+- :oslo.config:option:`serial_console.base_url`
+- :oslo.config:option:`serial_console.proxyclient_address`
+- :oslo.config:option:`serial_console.port_range`
- To use live migration, use the 0.0.0.0 address.
+For example, to configure this via a ``nova.conf`` file:
-- ``server_proxyclient_address=127.0.0.1``
+.. code-block:: ini
- The address of the compute host that Compute instructs proxies to use when
- connecting to instance ``vncservers``.
+ [serial_console]
+ enabled = True
+ base_url = ws://IP_ADDRESS:6083/
+ proxyclient_address = 127.0.0.1
+ port_range = 10000:20000
+
+Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
+by the outside world. For example, this may be the management interface IP
+address of the controller or the VIP.
+
+There are some things to keep in mind when configuring these options:
+
+* :oslo.config:option:`serial_console.serialproxy_host` is the address the
+ :program:`nova-serialproxy` service listens to for incoming connections.
+* :oslo.config:option:`serial_console.serialproxy_port` must be the same value
+ as the port in the URI of :oslo.config:option:`serial_console.base_url`.
+* The URL defined in :oslo.config:option:`serial_console.base_url` will form
+ part of the response the user will get when asking for a serial console
+ connection string. This means it needs to be an URL the user can connect to.
+* :oslo.config:option:`serial_console.proxyclient_address` will be used by the
+ :program:`nova-serialproxy` service to determine where to connect to for
+ proxying the console interaction.
-Frequently asked questions about VNC access to virtual machines
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- **Q: What is the difference between ``nova-xvpvncproxy`` and
- ``nova-novncproxy``?**
+RDP
+---
- A: ``nova-xvpvncproxy``, which ships with OpenStack Compute, is a proxy that
- supports a simple Java client. nova-novncproxy uses noVNC to provide VNC
- support through a web browser.
+RDP is a graphical console primarily used with Hyper-V. Nova does not provide a
+console proxy service for RDP - instead, an external proxy service, such as the
+:program:`wsgate` application provided by `FreeRDP-WebConnect`__, should be
+used.
+
+__ https://github.com/FreeRDP/FreeRDP-WebConnect
+
+Configuration
+~~~~~~~~~~~~~
+
+To enable the RDP console service, you must configure both a console proxy
+service like :program:`wsgate` and the :program:`nova-compute` service. All
+options for the latter service are defined in the :oslo.config:group:`rdp`
+group.
+
+Information on configuring an RDP console proxy service, such as
+:program:`wsgate`, is not provided here. However, more information can be found
+at `cloudbase.it`__.
+
+The :program:`nova-compute` service requires the following options to configure
+RDP console support.
+
+- :oslo.config:option:`rdp.enabled`
+- :oslo.config:option:`rdp.html5_proxy_base_url`
+
+For example, to configure this via a ``nova.conf`` file:
+
+.. code-block:: ini
+
+ [rdp]
+ enabled = True
+ html5_proxy_base_url = https://IP_ADDRESS:6083/
+
+Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
+by the outside world. For example, this may be the management interface IP
+address of the controller or the VIP.
+
+__ https://cloudbase.it/freerdp-html5-proxy-windows/
-- **Q: I want VNC support in the OpenStack dashboard. What services do I
- need?**
- A: You need ``nova-novncproxy``, ``nova-consoleauth``, and correctly
- configured compute hosts.
+MKS
+---
-- **Q: When I use ``nova get-vnc-console`` or click on the VNC tab of the
- OpenStack dashboard, it hangs. Why?**
+MKS is the protocol used for accessing the console of a virtual machine running
+on VMware vSphere. It is very similar to VNC. Due to the architecture of the
+VMware vSphere hypervisor, it is not necessary to run a console proxy service.
- A: Make sure you are running ``nova-consoleauth`` (in addition to
- ``nova-novncproxy``). The proxies rely on ``nova-consoleauth`` to validate
- tokens, and waits for a reply from them until a timeout is reached.
+Configuration
+~~~~~~~~~~~~~
+
+To enable the MKS console service, only the :program:`nova-compute` service
+must be configured. All options are defined in the :oslo.config:group:`mks`
+group.
+
+The :program:`nova-compute` service requires the following options to configure
+MKS console support.
+
+- :oslo.config:option:`mks.enabled`
+- :oslo.config:option:`mks.mksproxy_base_url`
+
+For example, to configure this via a ``nova.conf`` file:
+
+.. code-block:: ini
+
+ [mks]
+ enabled = True
+ mksproxy_base_url = https://127.0.0.1:6090/
+
+
+.. _about-nova-consoleauth:
+
+About ``nova-consoleauth``
+--------------------------
+
+The now-removed :program:`nova-consoleauth` service was previously used to
+provide a shared service to manage token authentication that the client proxies
+outlined below could leverage. Token authentication was moved to the database in
+18.0.0 (Rocky) and the service was removed in 20.0.0 (Train).
+
+
+Frequently Asked Questions
+--------------------------
+
+- **Q: I want VNC support in the OpenStack dashboard. What services do I
+ need?**
+
+ A: You need ``nova-novncproxy`` and correctly configured compute hosts.
- **Q: My VNC proxy worked fine during my all-in-one test, but now it doesn't
work on multi host. Why?**
@@ -421,13 +561,12 @@ Frequently asked questions about VNC access to virtual machines
Your ``nova-compute`` configuration file must set the following values:
- .. code-block:: console
+ .. code-block:: ini
[vnc]
# These flags help construct a connection data structure
server_proxyclient_address=192.168.1.2
novncproxy_base_url=http://172.24.1.1:6080/vnc_auto.html
- xvpvncproxy_base_url=http://172.24.1.1:6081/console
# This is the address where the underlying vncserver (not the proxy)
# will listen for connections.
@@ -435,11 +574,11 @@ Frequently asked questions about VNC access to virtual machines
.. note::
- ``novncproxy_base_url`` and ``xvpvncproxy_base_url`` use a public IP; this
- is the URL that is ultimately returned to clients, which generally do not
- have access to your private network. Your PROXYSERVER must be able to
- reach ``server_proxyclient_address``, because that is the address over
- which the VNC connection is proxied.
+ ``novncproxy_base_url`` uses a public IP; this is the URL that is
+ ultimately returned to clients, which generally do not have access to your
+ private network. Your PROXYSERVER must be able to reach
+ ``server_proxyclient_address``, because that is the address over which the
+ VNC connection is proxied.
- **Q: My noVNC does not work with recent versions of web browsers. Why?**
@@ -456,7 +595,7 @@ Frequently asked questions about VNC access to virtual machines
Modify the ``width`` and ``height`` options, as follows:
- .. code-block:: console
+ .. code-block:: ini
@@ -467,50 +606,9 @@ Frequently asked questions about VNC access to virtual machines
console connections, make sure that the value of ``novncproxy_base_url`` is
set explicitly where the ``nova-novncproxy`` service is running.
-Serial Console
---------------
-
-The *serial console* feature [1]_ in nova is an alternative for graphical
-consoles like *VNC*, *SPICE*, *RDP*. The example below uses these nodes:
-
-* controller node with IP ``192.168.50.100``
-* compute node 1 with IP ``192.168.50.104``
-* compute node 2 with IP ``192.168.50.105``
-
-Here's the general flow of actions:
-
-.. figure:: figures/serial-console-flow.svg
- :width: 100%
- :alt: The serial console flow
-
-1. The user requests a serial console connection string for an instance
- from the REST API.
-2. The `nova-api` service asks the `nova-compute` service, which manages
- that instance, to fulfill that request.
-3. That connection string gets used by the user to connect to the
- `nova-serialproxy` service.
-4. The `nova-serialproxy` service then proxies the console interaction
- to the port of the compute node where the instance is running. That
- port gets forwarded by the hypervisor into the KVM guest.
-
-The config options for those nodes, which are in the section
-``[serial_console]`` of your ``nova.conf``, are not intuitive at first.
-Keep these things in mind:
-
-* The ``serialproxy_host`` is the address the `nova-serialproxy` service
- listens to for incoming connections (see step 3).
-* The ``serialproxy_port`` value must be the very same as in the URI
- of ``base_url``.
-* The ``base_url`` on the compute node will be part of the response the user
- will get when asking for a serial console connection string (see step 1
- from above). This means it needs to be an URL the user can connect to.
-* The ``proxyclient_address`` on the compute node will be used by the
- `nova-serialproxy` service to determine where to connect to for
- proxying the console interaction.
References
----------
-.. [1] https://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html
-.. [2] https://qemu.weilnetz.de/doc/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify
-.. [3] https://tools.ietf.org/html/rfc3280#section-4.2.1.10
+.. [1] https://qemu.weilnetz.de/doc/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify
+.. [2] https://tools.ietf.org/html/rfc3280#section-4.2.1.10
diff --git a/doc/source/admin/resource-limits.rst b/doc/source/admin/resource-limits.rst
new file mode 100644
index 00000000000..c74ad31c17b
--- /dev/null
+++ b/doc/source/admin/resource-limits.rst
@@ -0,0 +1,312 @@
+===============
+Resource Limits
+===============
+
+Nova supports configuring limits on individual resources including CPU, memory,
+disk and network. These limits can be used to enforce basic Quality-of-Service
+(QoS) policies on such resources.
+
+.. note::
+
+ Hypervisor-enforced resource limits are distinct from API-enforced user and
+ project quotas. For information on the latter, refer to :doc:`quotas`.
+
+.. warning::
+
+ This feature is poorly tested and poorly maintained. It may no longer work
+ as expected. Where possible, consider using the QoS policies provided by
+ other services, such as
+ :cinder-doc:`Cinder ` and
+ :neutron-doc:`Neutron `.
+
+
+Configuring resource limits
+---------------------------
+
+Resource quota enforcement support is specific to the virt driver in use on
+compute hosts.
+
+libvirt
+~~~~~~~
+
+The libvirt driver supports CPU, disk and VIF limits. Unfortunately all of
+these work quite differently, as discussed below.
+
+CPU limits
+^^^^^^^^^^
+
+Libvirt enforces CPU limits in terms of *shares* and *quotas*, configured
+via :nova:extra-spec:`quota:cpu_shares` and :nova:extra-spec:`quota:cpu_period`
+/ :nova:extra-spec:`quota:cpu_quota`, respectively. Both are implemented using
+the `cgroups v1 cpu controller`__.
+
+CPU shares are a proportional weighted share of total CPU resources relative to
+other instances. It does not limit CPU usage if CPUs are not busy. There is no
+unit and the value is purely relative to other instances, so an instance
+configured with value of 2048 will get twice as much CPU time as a VM
+configured with the value 1024. For example, to configure a CPU share of 1024
+for a flavor:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR --property quota:cpu_shares=1024
+
+The CPU quotas require both a period and quota. The CPU period specifies the
+enforcement interval in microseconds, while the CPU quota specifies the maximum
+allowed bandwidth in microseconds that the each vCPU of the instance can
+consume. The CPU period must be in the range 1000 (1mS) to 1,000,000 (1s) or 0
+(disabled). The CPU quota must be in the range 1000 (1mS) to 2^64 or 0
+(disabled). Where the CPU quota exceeds the CPU period, this means the guest
+vCPU process is able to consume multiple pCPUs worth of bandwidth. For example,
+to limit each guest vCPU to 1 pCPU worth of runtime per period:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property quota:cpu_period=1000 \
+ --property quota:cpu_quota=1000
+
+To limit each guest vCPU to 2 pCPUs worth of runtime per period:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property quota:cpu_period=1000 \
+ --property quota:cpu_quota=2000
+
+Finally, to limit each guest vCPU to 0.5 pCPUs worth of runtime per period:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property quota:cpu_period=1000 \
+ --property quota:cpu_quota=500
+
+.. note::
+
+ Smaller periods will ensure a consistent latency response at the expense of
+ burst capacity.
+
+CPU shares and CPU quotas can work hand-in-hand. For example, if two instances
+were configured with :nova:extra-spec:`quota:cpu_shares`\ =1024 and
+:nova:extra-spec:`quota:cpu_period`\ =100000 (100mS) for both, then configuring
+both with a :nova:extra-spec:`quota:cpu_quota`\ =75000 (75mS) will result in
+them sharing a host CPU equally, with both getting exactly 50mS of CPU time.
+If instead only one instance gets :nova:extra-spec:`quota:cpu_quota`\ =75000
+(75mS) while the other gets :nova:extra-spec:`quota:cpu_quota`\ =25000 (25mS),
+then the first will get 3/4 of the time per period.
+
+.. __: https://man7.org/linux/man-pages/man7/cgroups.7.html
+
+Memory Limits
+^^^^^^^^^^^^^
+
+The libvirt driver does not support memory limits.
+
+Disk I/O Limits
+^^^^^^^^^^^^^^^
+
+Libvirt enforces disk limits through maximum disk read, write and total bytes
+per second, using the :nova:extra-spec:`quota:disk_read_bytes_sec`,
+:nova:extra-spec:`quota:disk_write_bytes_sec` and
+:nova:extra-spec:`quota:disk_total_bytes_sec` extra specs, respectively. It can
+also enforce disk limits through maximum disk read, write and total I/O
+operations per second, using the :nova:extra-spec:`quota:disk_read_iops_sec`,
+:nova:extra-spec:`quota:disk_write_iops_sec` and
+:nova:extra-spec:`quota:disk_total_iops_sec` extra specs, respectively. For
+example, to set a maximum disk write of 10 MB/sec for a flavor:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property quota:disk_write_bytes_sec=10485760
+
+Network bandwidth limits
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. warning::
+
+ These limits are enforced via libvirt and will only work where the network
+ is connect to the instance using a tap interface. It will not work for
+ things like :doc:`SR-IOV VFs `.
+ :neutron-doc:`Neutron's QoS policies ` should be
+ preferred wherever possible.
+
+Libvirt enforces network bandwidth limits through inbound and outbound average,
+using the :nova:extra-spec:`quota:vif_inbound_average` and
+:nova:extra-spec:`quota:vif_outbound_average` extra specs, respectively.
+In addition, optional *peak* values, which specifies the maximum rate at which
+a bridge can send data (kB/s), and *burst* values, which specifies the amount
+of bytes that can be burst at peak speed (kilobytes), can be specified for both
+inbound and outbound traffic, using the
+:nova:extra-spec:`quota:vif_inbound_peak` /
+:nova:extra-spec:`quota:vif_outbound_peak` and
+:nova:extra-spec:`quota:vif_inbound_burst` /
+:nova:extra-spec:`quota:vif_outbound_burst` extra specs, respectively.
+
+For example, to configure **outbound** traffic to an average of 262 Mbit/s
+(32768 kB/s), a peak of 524 Mbit/s, and burst of 65536 kilobytes:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property quota:vif_outbound_average=32768 \
+ --property quota:vif_outbound_peak=65536 \
+ --property quota:vif_outbound_burst=65536
+
+.. note::
+
+ The speed limit values in above example are specified in kilobytes/second,
+ whle the burst value is in kilobytes.
+
+VMWare
+~~~~~~
+
+In contrast to libvirt, the VMWare virt driver enforces resource limits using
+consistent terminology, specifically through relative allocation levels, hard
+upper limits and minimum reservations configured via, for example, the
+:nova:extra-spec:`quota:cpu_shares_level` /
+:nova:extra-spec:`quota:cpu_shares_share`, :nova:extra-spec:`quota:cpu_limit`,
+and :nova:extra-spec:`quota:cpu_reservation` extra specs, respectively.
+
+Allocation levels can be specified using one of ``high``, ``normal``, ``low``,
+or ``custom``. When ``custom`` is specified, the number of shares must be
+specified using e.g. :nova:extra-spec:`quota:cpu_shares_share`. There is no
+unit and the values are relative to other instances on the host. The upper
+limits and reservations, by comparison, are measure in resource-specific units,
+such as MHz for CPUs and will ensure that the instance never used more than or
+gets less than the specified amount of the resource.
+
+CPU limits
+^^^^^^^^^^
+
+CPU limits are configured via the :nova:extra-spec:`quota:cpu_shares_level` /
+:nova:extra-spec:`quota:cpu_shares_share`, :nova:extra-spec:`quota:cpu_limit`,
+and :nova:extra-spec:`quota:cpu_reservation` extra specs.
+
+For example, to configure a CPU allocation level of ``custom`` with 1024
+shares:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --quota:cpu_shares_level=custom \
+ --quota:cpu_shares_share=1024
+
+To configure a minimum CPU allocation of 1024 MHz and a maximum of 2048 MHz:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --quota:cpu_reservation=1024 \
+ --quota:cpu_limit=2048
+
+Memory limits
+^^^^^^^^^^^^^
+
+Memory limits are configured via the
+:nova:extra-spec:`quota:memory_shares_level` /
+:nova:extra-spec:`quota:memory_shares_share`,
+:nova:extra-spec:`quota:memory_limit`, and
+:nova:extra-spec:`quota:memory_reservation` extra specs.
+
+For example, to configure a memory allocation level of ``custom`` with 1024
+shares:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --quota:memory_shares_level=custom \
+ --quota:memory_shares_share=1024
+
+To configure a minimum memory allocation of 1024 MB and a maximum of 2048 MB:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --quota:memory_reservation=1024 \
+ --quota:memory_limit=2048
+
+Disk I/O limits
+^^^^^^^^^^^^^^^
+
+Disk I/O limits are configured via the
+:nova:extra-spec:`quota:disk_io_shares_level` /
+:nova:extra-spec:`quota:disk_io_shares_share`,
+:nova:extra-spec:`quota:disk_io_limit`, and
+:nova:extra-spec:`quota:disk_io_reservation` extra specs.
+
+For example, to configure a disk I/O allocation level of ``custom`` with 1024
+shares:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --quota:disk_io_shares_level=custom \
+ --quota:disk_io_shares_share=1024
+
+To configure a minimum disk I/O allocation of 1024 MB and a maximum of 2048 MB:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --quota:disk_io_reservation=1024 \
+ --quota:disk_io_limit=2048
+
+Network bandwidth limits
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Network bandwidth limits are configured via the
+:nova:extra-spec:`quota:vif_shares_level` /
+:nova:extra-spec:`quota:vif_shares_share`,
+:nova:extra-spec:`quota:vif_limit`, and
+:nova:extra-spec:`quota:vif_reservation` extra specs.
+
+For example, to configure a network bandwidth allocation level of ``custom``
+with 1024 shares:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --quota:vif_shares_level=custom \
+ --quota:vif_shares_share=1024
+
+To configure a minimum bandwidth allocation of 1024 Mbits/sec and a maximum of
+2048 Mbits/sec:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --quota:vif_reservation=1024 \
+ --quota:vif_limit=2048
+
+Hyper-V
+~~~~~~~
+
+CPU limits
+^^^^^^^^^^
+
+The Hyper-V driver does not support CPU limits.
+
+Memory limits
+^^^^^^^^^^^^^
+
+The Hyper-V driver does not support memory limits.
+
+Disk I/O limits
+^^^^^^^^^^^^^^^
+
+Hyper-V enforces disk limits through maximum total bytes and total I/O
+operations per second, using the :nova:extra-spec:`quota:disk_total_bytes_sec`
+and :nova:extra-spec:`quota:disk_total_iops_sec` extra specs, respectively. For
+example, to set a maximum disk read/write of 10 MB/sec for a flavor:
+
+.. code-block:: console
+
+ $ openstack flavor set $FLAVOR \
+ --property quota:disk_total_bytes_sec=10485760
+
+Network bandwidth limits
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Hyper-V driver does not support network bandwidth limits.
diff --git a/doc/source/admin/root-wrap-reference.rst b/doc/source/admin/root-wrap-reference.rst
index b25b8200e8c..1a94d616c67 100644
--- a/doc/source/admin/root-wrap-reference.rst
+++ b/doc/source/admin/root-wrap-reference.rst
@@ -70,7 +70,7 @@ and is a Kilo release feature.
Including this workaround in your configuration file safeguards your
environment from issues that can impair root wrapper performance. Tool changes
that have impacted `Python Build Reasonableness (PBR)
-`__ for example, are a known
+`__ for example, are a known
issue that affects root wrapper performance.
To set up this workaround, configure the ``disable_rootwrap`` option in the
diff --git a/doc/source/admin/scheduling.rst b/doc/source/admin/scheduling.rst
new file mode 100644
index 00000000000..0b93792ac7a
--- /dev/null
+++ b/doc/source/admin/scheduling.rst
@@ -0,0 +1,1416 @@
+==================
+Compute schedulers
+==================
+
+Compute uses the ``nova-scheduler`` service to determine how to dispatch
+compute requests. For example, the ``nova-scheduler`` service determines on
+which host or node a VM should launch. You can configure the scheduler through
+a variety of options.
+
+In the default configuration, this scheduler considers hosts that meet all the
+following criteria:
+
+* Are in the requested :term:`Availability Zone` (``AvailabilityZoneFilter``).
+
+* Can service the request meaning the nova-compute service handling the target
+ node is available and not disabled (``ComputeFilter``).
+
+* Satisfy the extra specs associated with the instance type
+ (``ComputeCapabilitiesFilter``).
+
+* Satisfy any architecture, hypervisor type, or virtual machine mode properties
+ specified on the instance's image properties (``ImagePropertiesFilter``).
+
+* Are on a different host than other instances of a group (if requested)
+ (``ServerGroupAntiAffinityFilter``).
+
+* Are in a set of group hosts (if requested) (``ServerGroupAffinityFilter``).
+
+The scheduler chooses a new host when an instance is migrated, resized,
+evacuated or unshelved after being shelve offloaded.
+
+When evacuating instances from a host, the scheduler service honors the target
+host defined by the administrator on the :command:`nova evacuate` command. If
+a target is not defined by the administrator, the scheduler determines the
+target host. For information about instance evacuation, see
+:ref:`Evacuate instances `.
+
+
+.. _compute-scheduler-filters:
+
+Prefilters
+----------
+
+As of the Rocky release, the scheduling process includes a prefilter step to
+increase the efficiency of subsequent stages. These *prefilters* are largely
+optional and serve to augment the request that is sent to placement to reduce
+the set of candidate compute hosts based on attributes that placement is able
+to answer for us ahead of time. In addition to the prefilters listed here, also
+see :ref:`tenant-isolation-with-placement` and
+:ref:`availability-zones-with-placement`.
+
+Compute Image Type Support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 20.0.0 (Train)
+
+Starting in the Train release, there is a prefilter available for
+excluding compute nodes that do not support the ``disk_format`` of the
+image used in a boot request. This behavior is enabled by setting
+:oslo.config:option:`scheduler.query_placement_for_image_type_support` to
+``True``. For example, the libvirt driver, when using ceph as an ephemeral
+backend, does not support ``qcow2`` images (without an expensive conversion
+step). In this case (and especially if you have a mix of ceph and
+non-ceph backed computes), enabling this feature will ensure that the
+scheduler does not send requests to boot a ``qcow2`` image to computes
+backed by ceph.
+
+Compute Disabled Status Support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 20.0.0 (Train)
+
+Starting in the Train release, there is a mandatory `pre-filter
+`_
+which will exclude disabled compute nodes similar to (but does not fully
+replace) the `ComputeFilter`_. Compute node resource providers with the
+``COMPUTE_STATUS_DISABLED`` trait will be excluded as scheduling candidates.
+The trait is managed by the ``nova-compute`` service and should mirror the
+``disabled`` status on the related compute service record in the
+`os-services`_ API. For example, if a compute service's status is ``disabled``,
+the related compute node resource provider(s) for that service should have the
+``COMPUTE_STATUS_DISABLED`` trait. When the service status is ``enabled`` the
+``COMPUTE_STATUS_DISABLED`` trait shall be removed.
+
+If the compute service is down when the status is changed, the trait will be
+synchronized by the compute service when it is restarted. Similarly, if an
+error occurs when trying to add or remove the trait on a given resource
+provider, the trait will be synchronized when the ``update_available_resource``
+periodic task runs - which is controlled by the
+:oslo.config:option:`update_resources_interval` configuration option.
+
+.. _os-services: https://docs.openstack.org/api-ref/compute/#compute-services-os-services
+
+Isolate Aggregates
+~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 20.0.0 (Train)
+
+Starting in the Train release, there is an optional placement pre-request filter
+:doc:`/reference/isolate-aggregates`
+When enabled, the traits required in the server's flavor and image must be at
+least those required in an aggregate's metadata in order for the server to be
+eligible to boot on hosts in that aggregate.
+
+
+The Filter Scheduler
+--------------------
+
+.. versionchanged:: 23.0.0 (Wallaby)
+
+ Support for custom filters was removed. Only the filter scheduler is now
+ supported by nova.
+
+Nova's scheduler, known as the *filter scheduler*, supports filtering and
+weighting to make informed decisions on where a new instance should be created.
+
+When the scheduler receives a request for a resource, it first applies filters
+to determine which hosts are eligible for consideration when dispatching a
+resource. Filters are binary: either a host is accepted by the filter, or it is
+rejected. Hosts that are accepted by the filter are then processed by a
+different algorithm to decide which hosts to use for that request, described in
+the :ref:`weights` section.
+
+**Filtering**
+
+.. figure:: /_static/images/filtering-workflow-1.png
+
+The :oslo.config:option:`filter_scheduler.available_filters` config option
+provides the Compute service with the list of the filters that are available
+for use by the scheduler. The default setting specifies all of the filters that
+are included with the Compute service. This configuration option can be
+specified multiple times. For example, if you implemented your own custom
+filter in Python called ``myfilter.MyFilter`` and you wanted to use both the
+built-in filters and your custom filter, your :file:`nova.conf` file would
+contain:
+
+.. code-block:: ini
+
+ [filter_scheduler]
+ available_filters = nova.scheduler.filters.all_filters
+ available_filters = myfilter.MyFilter
+
+The :oslo.config:option:`filter_scheduler.enabled_filters` configuration option
+in ``nova.conf`` defines the list of filters that are applied by the
+``nova-scheduler`` service.
+
+
+Filters
+-------
+
+The following sections describe the available compute filters.
+
+Filters are configured using the following config options:
+
+- :oslo.config:option:`filter_scheduler.available_filters` - Defines filter
+ classes made available to the scheduler. This setting can be used multiple
+ times.
+- :oslo.config:option:`filter_scheduler.enabled_filters` - Of the available
+ filters, defines those that the scheduler uses by default.
+
+Each filter selects hosts in a different way and has different costs. The order
+of :oslo.config:option:`filter_scheduler.enabled_filters` affects scheduling
+performance. The general suggestion is to filter out invalid hosts as soon as
+possible to avoid unnecessary costs. We can sort
+:oslo.config:option:`filter_scheduler.enabled_filters`
+items by their costs in reverse order. For example, ``ComputeFilter`` is better
+before any resource calculating filters like ``NUMATopologyFilter``.
+
+In medium/large environments having AvailabilityZoneFilter before any
+capability or resource calculating filters can be useful.
+
+.. _AggregateImagePropertiesIsolation:
+
+``AggregateImagePropertiesIsolation``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionchanged:: 12.0.0 (Liberty)
+
+ Prior to 12.0.0 Liberty, it was possible to specify and use arbitrary
+ metadata with this filter. Starting in Liberty, nova only parses
+ :glance-doc:`standard metadata `. If
+ you wish to use arbitrary metadata, consider using the
+ :ref:`AggregateInstanceExtraSpecsFilter` filter instead.
+
+Matches properties defined in an image's metadata against those of aggregates
+to determine host matches:
+
+* If a host belongs to an aggregate and the aggregate defines one or more
+ metadata that matches an image's properties, that host is a candidate to boot
+ the image's instance.
+
+* If a host does not belong to any aggregate, it can boot instances from all
+ images.
+
+For example, the following aggregate ``myWinAgg`` has the Windows operating
+system as metadata (named 'windows'):
+
+.. code-block:: console
+
+ $ openstack aggregate show myWinAgg
+ +-------------------+----------------------------+
+ | Field | Value |
+ +-------------------+----------------------------+
+ | availability_zone | zone1 |
+ | created_at | 2017-01-01T15:36:44.000000 |
+ | deleted | False |
+ | deleted_at | None |
+ | hosts | ['sf-devel'] |
+ | id | 1 |
+ | name | myWinAgg |
+ | properties | os_distro='windows' |
+ | updated_at | None |
+ +-------------------+----------------------------+
+
+In this example, because the following Win-2012 image has the ``windows``
+property, it boots on the ``sf-devel`` host (all other filters being equal):
+
+.. code-block:: console
+
+ $ openstack image show Win-2012
+ +------------------+------------------------------------------------------+
+ | Field | Value |
+ +------------------+------------------------------------------------------+
+ | checksum | ee1eca47dc88f4879d8a229cc70a07c6 |
+ | container_format | bare |
+ | created_at | 2016-12-13T09:30:30Z |
+ | disk_format | qcow2 |
+ | ... |
+ | name | Win-2012 |
+ | ... |
+ | properties | os_distro='windows' |
+ | ... |
+
+You can configure the ``AggregateImagePropertiesIsolation`` filter by using the
+following options in the ``nova.conf`` file:
+
+- :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_namespace`
+- :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_separator`
+
+.. note::
+
+ This filter has limitations as described in `bug 1677217
+ `_
+ which are addressed in placement :doc:`/reference/isolate-aggregates`
+ request filter.
+
+Refer to :doc:`/admin/aggregates` for more information.
+
+
+.. _AggregateInstanceExtraSpecsFilter:
+
+``AggregateInstanceExtraSpecsFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Matches properties defined in extra specs for an instance type against
+admin-defined properties on a host aggregate. Works with specifications that
+are scoped with ``aggregate_instance_extra_specs``. Multiple values can be
+given, as a comma-separated list. For backward compatibility, also works with
+non-scoped specifications; this action is highly discouraged because it
+conflicts with :ref:`ComputeCapabilitiesFilter` filter when you enable both
+filters.
+
+Refer to :doc:`/admin/aggregates` for more information.
+
+
+.. _AggregateIoOpsFilter:
+
+``AggregateIoOpsFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Filters host by disk allocation with a per-aggregate ``max_io_ops_per_host``
+value. If the per-aggregate value is not found, the value falls back to the
+global setting defined by the
+`:oslo.config:option:`filter_scheduler.max_io_ops_per_host` config option.
+If the host is in more than one aggregate and more than one value is found, the
+minimum value will be used.
+
+Refer to :doc:`/admin/aggregates` and :ref:`IoOpsFilter` for more information.
+
+
+.. _AggregateMultiTenancyIsolation:
+
+``AggregateMultiTenancyIsolation``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Ensures hosts in tenant-isolated host aggregates will only be available to a
+specified set of tenants. If a host is in an aggregate that has the
+``filter_tenant_id`` metadata key, the host can build instances from only that
+tenant or comma-separated list of tenants. A host can be in different
+aggregates. If a host does not belong to an aggregate with the metadata key,
+the host can build instances from all tenants. This does not restrict the
+tenant from creating servers on hosts outside the tenant-isolated aggregate.
+
+For example, consider there are two available hosts for scheduling, ``HostA``
+and ``HostB``. ``HostB`` is in an aggregate isolated to tenant ``X``. A server
+create request from tenant ``X`` will result in either ``HostA`` *or* ``HostB``
+as candidates during scheduling. A server create request from another tenant
+``Y`` will result in only ``HostA`` being a scheduling candidate since
+``HostA`` is not part of the tenant-isolated aggregate.
+
+.. note::
+
+ There is a `known limitation
+ `_ with the number of tenants
+ that can be isolated per aggregate using this filter. This limitation does
+ not exist, however, for the :ref:`tenant-isolation-with-placement`
+ filtering capability added in the 18.0.0 Rocky release.
+
+
+.. _AggregateNumInstancesFilter:
+
+``AggregateNumInstancesFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Filters host in an aggregate by number of instances with a per-aggregate
+``max_instances_per_host`` value. If the per-aggregate value is not found, the
+value falls back to the global setting defined by the
+:oslo.config:option:`filter_scheduler.max_instances_per_host` config option.
+If the host is in more than one aggregate and thus more than one value is
+found, the minimum value will be used.
+
+Refer to :doc:`/admin/aggregates` and :ref:`NumInstancesFilter` for more
+information.
+
+
+.. _AggregateTypeAffinityFilter:
+
+``AggregateTypeAffinityFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Filters hosts in an aggregate if the name of the instance's flavor matches that
+of the ``instance_type`` key set in the aggregate's metadata or if the
+``instance_type`` key is not set.
+
+The value of the ``instance_type`` metadata entry is a string that may contain
+either a single ``instance_type`` name or a comma-separated list of
+``instance_type`` names, such as ``m1.nano`` or ``m1.nano,m1.small``.
+
+.. note::
+
+ Instance types are a historical name for flavors.
+
+Refer to :doc:`/admin/aggregates` for more information.
+
+
+``AllHostsFilter``
+~~~~~~~~~~~~~~~~~~
+
+This is a no-op filter. It does not eliminate any of the available hosts.
+
+
+.. _AvailabilityZoneFilter:
+
+``AvailabilityZoneFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Filters hosts by availability zone. It passes hosts matching the availability
+zone specified in the instance properties. Use a comma to specify multiple
+zones. The filter will then ensure it matches any zone specified.
+
+You must enable this filter for the scheduler to respect availability zones in
+requests.
+
+Refer to :doc:`/admin/availability-zones` for more information.
+
+.. _ComputeCapabilitiesFilter:
+
+``ComputeCapabilitiesFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Filters hosts by matching properties defined in flavor extra specs against compute
+capabilities. If an extra specs key contains a colon (``:``), anything before
+the colon is treated as a namespace and anything after the colon is treated as
+the key to be matched. If a namespace is present and is not ``capabilities``,
+the filter ignores the namespace.
+For example ``capabilities:cpu_info:features`` is a valid scope format.
+For backward compatibility, the filter also treats the
+extra specs key as the key to be matched if no namespace is present; this
+action is highly discouraged because it conflicts with
+:ref:`AggregateInstanceExtraSpecsFilter` filter when you enable both filters.
+
+The extra specifications can have an operator at the beginning of the value
+string of a key/value pair. If there is no operator specified, then a
+default operator of ``s==`` is used. Valid operators are:
+
+* ``=`` (equal to or greater than as a number; same as vcpus case)
+* ``==`` (equal to as a number)
+* ``!=`` (not equal to as a number)
+* ``>=`` (greater than or equal to as a number)
+* ``<=`` (less than or equal to as a number)
+* ``s==`` (equal to as a string)
+* ``s!=`` (not equal to as a string)
+* ``s>=`` (greater than or equal to as a string)
+* ``s>`` (greater than as a string)
+* ``s<=`` (less than or equal to as a string)
+* ``s<`` (less than as a string)
+* ```` (substring)
+* ```` (all elements contained in collection)
+* ```` (find one of these)
+
+Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and
+`` fpu gpu``
+
+Some of attributes that can be used as useful key and their values contains:
+
+* ``free_ram_mb`` (compared with a number, values like ``>= 4096``)
+* ``free_disk_mb`` (compared with a number, values like ``>= 10240``)
+* ``host`` (compared with a string, values like `` compute``, ``s== compute_01``)
+* ``hypervisor_type`` (compared with a string, values like ``s== QEMU``, ``s== powervm``)
+* ``hypervisor_version`` (compared with a number, values like ``>= 1005003``, ``== 2000000``)
+* ``num_instances`` (compared with a number, values like ``<= 10``)
+* ``num_io_ops`` (compared with a number, values like ``<= 5``)
+* ``vcpus_total`` (compared with a number, values like ``= 48``, ``>=24``)
+* ``vcpus_used`` (compared with a number, values like ``= 0``, ``<= 10``)
+
+Some virt drivers support reporting CPU traits to the Placement service. With
+that feature available, you should consider using traits in flavors instead of
+``ComputeCapabilitiesFilter`` because traits provide consistent naming for CPU
+features in some virt drivers and querying traits is efficient. For more
+details, refer to :doc:`/user/support-matrix`,
+:ref:`Required traits `,
+:ref:`Forbidden traits ` and
+`Report CPU features to the Placement service `_.
+
+Also refer to `Compute capabilities as traits`_.
+
+
+.. _ComputeFilter:
+
+``ComputeFilter``
+-----------------
+
+Passes all hosts that are operational and enabled.
+
+In general, you should always enable this filter.
+
+
+``DifferentHostFilter``
+-----------------------
+
+Schedules the instance on a different host from a set of instances. To take
+advantage of this filter, the requester must pass a scheduler hint, using
+``different_host`` as the key and a list of instance UUIDs as the value. This
+filter is the opposite of the ``SameHostFilter``.
+
+For example, when using the :command:`openstack server create` command, use the
+``--hint`` flag:
+
+.. code-block:: console
+
+ $ openstack server create \
+ --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \
+ --hint different_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \
+ --hint different_host=8c19174f-4220-44f0-824a-cd1eeef10287 \
+ server-1
+
+With the API, use the ``os:scheduler_hints`` key. For example:
+
+.. code-block:: json
+
+ {
+ "server": {
+ "name": "server-1",
+ "imageRef": "cedef40a-ed67-4d10-800e-17455edce175",
+ "flavorRef": "1"
+ },
+ "os:scheduler_hints": {
+ "different_host": [
+ "a0cf03a5-d921-4877-bb5c-86d26cf818e1",
+ "8c19174f-4220-44f0-824a-cd1eeef10287"
+ ]
+ }
+ }
+
+
+.. _ImagePropertiesFilter:
+
+``ImagePropertiesFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Filters hosts based on properties defined on the instance's image. It passes
+hosts that can support the specified image properties contained in the
+instance. Properties include the architecture, hypervisor type, hypervisor
+version, and virtual machine mode.
+
+For example, an instance might require a host that runs an ARM-based processor,
+and QEMU as the hypervisor. You can decorate an image with these properties by
+using:
+
+.. code-block:: console
+
+ $ openstack image set --architecture arm --property img_hv_type=qemu \
+ img-uuid
+
+The image properties that the filter checks for are:
+
+``hw_architecture``
+ Describes the machine architecture required by the image. Examples are
+ ``i686``, ``x86_64``, ``arm``, and ``ppc64``.
+
+ .. versionchanged:: 12.0.0 (Liberty)
+
+ This was previously called ``architecture``.
+
+``img_hv_type``
+ Describes the hypervisor required by the image. Examples are ``qemu``
+ and ``hyperv``.
+
+ .. note::
+
+ ``qemu`` is used for both QEMU and KVM hypervisor types.
+
+ .. versionchanged:: 12.0.0 (Liberty)
+
+ This was previously called ``hypervisor_type``.
+
+``img_hv_requested_version``
+ Describes the hypervisor version required by the image. The property is
+ supported for HyperV hypervisor type only. It can be used to enable support for
+ multiple hypervisor versions, and to prevent instances with newer HyperV tools
+ from being provisioned on an older version of a hypervisor. If available, the
+ property value is compared to the hypervisor version of the compute host.
+
+ To filter the hosts by the hypervisor version, add the
+ ``img_hv_requested_version`` property on the image as metadata and pass an
+ operator and a required hypervisor version as its value:
+
+ .. code-block:: console
+
+ $ openstack image set --property hypervisor_type=hyperv --property \
+ hypervisor_version_requires=">=6000" img-uuid
+
+ .. versionchanged:: 12.0.0 (Liberty)
+
+ This was previously called ``hypervisor_version_requires``.
+
+``hw_vm_mode``
+ describes the hypervisor application binary interface (ABI) required by the
+ image. Examples are ``xen`` for Xen 3.0 paravirtual ABI, ``hvm`` for native
+ ABI, and ``exe`` for container virt executable ABI.
+
+ .. versionchanged:: 12.0.0 (Liberty)
+
+ This was previously called ``vm_mode``.
+
+
+``IsolatedHostsFilter``
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Allows the admin to define a special (isolated) set of images and a special
+(isolated) set of hosts, such that the isolated images can only run on the
+isolated hosts, and the isolated hosts can only run isolated images. The flag
+``restrict_isolated_hosts_to_isolated_images`` can be used to force isolated
+hosts to only run isolated images.
+
+The logic within the filter depends on the
+``restrict_isolated_hosts_to_isolated_images`` config option, which defaults
+to True. When True, a volume-backed instance will not be put on an isolated
+host. When False, a volume-backed instance can go on any host, isolated or
+not.
+
+The admin must specify the isolated set of images and hosts using the
+:oslo.config:option:`filter_scheduler.isolated_hosts` and
+:oslo.config:option:`filter_scheduler.isolated_images` config options.
+For example:
+
+.. code-block:: ini
+
+ [filter_scheduler]
+ isolated_hosts = server1, server2
+ isolated_images = 342b492c-128f-4a42-8d3a-c5088cf27d13, ebd267a6-ca86-4d6c-9a0e-bd132d6b7d09
+
+You can also specify that isolated host only be used for specific isolated
+images using the
+:oslo.config:option:`filter_scheduler.restrict_isolated_hosts_to_isolated_images`
+config option.
+
+
+.. _IoOpsFilter:
+
+``IoOpsFilter``
+~~~~~~~~~~~~~~~
+
+Filters hosts by concurrent I/O operations on it. Hosts with too many
+concurrent I/O operations will be filtered out. The
+:oslo.config:option:`filter_scheduler.max_io_ops_per_host` option specifies the
+maximum number of I/O intensive instances allowed to run on a host.
+A host will be ignored by the scheduler if more than
+:oslo.config:option:`filter_scheduler.max_io_ops_per_host` instances in build,
+resize, snapshot, migrate, rescue or unshelve task states are running on it.
+
+
+``JsonFilter``
+~~~~~~~~~~~~~~~
+
+.. warning::
+
+ This filter is not enabled by default and not comprehensively
+ tested, and thus could fail to work as expected in non-obvious ways.
+ Furthermore, the filter variables are based on attributes of the
+ `HostState`_ class which could change from release to release so usage
+ of this filter is generally not recommended. Consider using other filters
+ such as the :ref:`ImagePropertiesFilter` or
+ :ref:`traits-based scheduling `.
+
+Allows a user to construct a custom filter by passing a
+scheduler hint in JSON format. The following operators are supported:
+
+* ``=``
+* ``<``
+* ``>``
+* ``in``
+* ``<=``
+* ``>=``
+* ``not``
+* ``or``
+* ``and``
+
+Unlike most other filters that rely on information provided via scheduler
+hints, this filter filters on attributes in the `HostState`_ class such as the
+following variables:
+
+* ``$free_ram_mb``
+* ``$free_disk_mb``
+* ``$hypervisor_hostname``
+* ``$total_usable_ram_mb``
+* ``$vcpus_total``
+* ``$vcpus_used``
+
+Using the :command:`openstack server create` command, use the ``--hint`` flag:
+
+.. code-block:: console
+
+ $ openstack server create --image 827d564a-e636-4fc4-a376-d36f7ebe1747 \
+ --flavor 1 --hint query='[">=","$free_ram_mb",1024]' server1
+
+With the API, use the ``os:scheduler_hints`` key:
+
+.. code-block:: json
+
+ {
+ "server": {
+ "name": "server-1",
+ "imageRef": "cedef40a-ed67-4d10-800e-17455edce175",
+ "flavorRef": "1"
+ },
+ "os:scheduler_hints": {
+ "query": "[\">=\",\"$free_ram_mb\",1024]"
+ }
+ }
+
+.. _HostState: https://opendev.org/openstack/nova/src/branch/master/nova/scheduler/host_manager.py
+
+
+``MetricsFilter``
+~~~~~~~~~~~~~~~~~
+
+Use in collaboration with the ``MetricsWeigher`` weigher. Filters hosts that
+do not report the metrics specified in
+:oslo.config:option:`metrics.weight_setting`, thus ensuring the metrics
+weigher will not fail due to these hosts.
+
+
+.. _NUMATopologyFilter:
+
+``NUMATopologyFilter``
+~~~~~~~~~~~~~~~~~~~~~~
+
+Filters hosts based on the NUMA topology that was specified for the instance
+through the use of flavor ``extra_specs`` in combination with the image
+properties, as described in detail in :doc:`/admin/cpu-topologies`. The filter
+will try to match the exact NUMA cells of the instance to those of the host. It
+will consider the standard over-subscription limits for each host NUMA cell,
+and provide limits to the compute host accordingly.
+
+This filter is essential if using instances with features that rely on NUMA,
+such as instance NUMA topologies or CPU pinning.
+
+.. note::
+
+ If instance has no topology defined, it will be considered for any host. If
+ instance has a topology defined, it will be considered only for NUMA capable
+ hosts.
+
+
+.. _NumInstancesFilter:
+
+``NumInstancesFilter``
+~~~~~~~~~~~~~~~~~~~~~~
+
+Filters hosts based on the number of instances running on them. Hosts that have
+more instances running than specified by the
+:oslo.config:option:`filter_scheduler.max_instances_per_host` config option are
+filtered out.
+
+
+.. _PciPassthroughFilter:
+
+``PciPassthroughFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The filter schedules instances on a host if the host has devices that meet the
+device requests in the ``extra_specs`` attribute for the flavor.
+
+This filter is essential if using instances with PCI device requests or where
+SR-IOV-based networking is in use on hosts.
+
+
+``SameHostFilter``
+~~~~~~~~~~~~~~~~~~
+
+Schedules an instance on the same host as all other instances in a set of
+instances. To take advantage of this filter, the requester must pass a
+scheduler hint, using ``same_host`` as the key and a list of instance UUIDs as
+the value. This filter is the opposite of the ``DifferentHostFilter``.
+
+For example, when using the :command:`openstack server create` command, use the
+``--hint`` flag:
+
+.. code-block:: console
+
+ $ openstack server create \
+ --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \
+ --hint same_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \
+ --hint same_host=8c19174f-4220-44f0-824a-cd1eeef10287 \
+ server-1
+
+With the API, use the ``os:scheduler_hints`` key:
+
+.. code-block:: json
+
+ {
+ "server": {
+ "name": "server-1",
+ "imageRef": "cedef40a-ed67-4d10-800e-17455edce175",
+ "flavorRef": "1"
+ },
+ "os:scheduler_hints": {
+ "same_host": [
+ "a0cf03a5-d921-4877-bb5c-86d26cf818e1",
+ "8c19174f-4220-44f0-824a-cd1eeef10287"
+ ]
+ }
+ }
+
+
+.. _ServerGroupAffinityFilter:
+
+``ServerGroupAffinityFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Restricts instances belonging to a server group to the same host(s). To take
+advantage of this filter, the requester must create a server group with an
+``affinity`` policy, and pass a scheduler hint, using ``group`` as the key and
+the server group UUID as the value.
+
+For example, when using the :command:`openstack server create` command, use the
+``--hint`` flag:
+
+.. code-block:: console
+
+ $ openstack server group create --policy affinity group-1
+ $ openstack server create --image IMAGE_ID --flavor 1 \
+ --hint group=SERVER_GROUP_UUID server-1
+
+
+.. _ServerGroupAntiAffinityFilter:
+
+``ServerGroupAntiAffinityFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Restricts instances belonging to a server group to separate hosts.
+To take advantage of this filter, the requester must create a
+server group with an ``anti-affinity`` policy, and pass a scheduler hint, using
+``group`` as the key and the server group UUID as the value.
+
+For example, when using the :command:`openstack server create` command, use the
+``--hint`` flag:
+
+.. code-block:: console
+
+ $ openstack server group create --policy anti-affinity group-1
+ $ openstack server create --image IMAGE_ID --flavor 1 \
+ --hint group=SERVER_GROUP_UUID server-1
+
+
+``SimpleCIDRAffinityFilter``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. todo::
+
+ Does this filter still work with neutron?
+
+Schedules the instance based on host IP subnet range. To take advantage of
+this filter, the requester must specify a range of valid IP address in CIDR
+format, by passing two scheduler hints:
+
+``build_near_host_ip``
+ The first IP address in the subnet (for example, ``192.168.1.1``)
+
+``cidr``
+ The CIDR that corresponds to the subnet (for example, ``/24``)
+
+When using the :command:`openstack server create` command, use the ``--hint``
+flag. For example, to specify the IP subnet ``192.168.1.1/24``:
+
+.. code-block:: console
+
+ $ openstack server create \
+ --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \
+ --hint build_near_host_ip=192.168.1.1 --hint cidr=/24 \
+ server-1
+
+With the API, use the ``os:scheduler_hints`` key:
+
+.. code-block:: json
+
+ {
+ "server": {
+ "name": "server-1",
+ "imageRef": "cedef40a-ed67-4d10-800e-17455edce175",
+ "flavorRef": "1"
+ },
+ "os:scheduler_hints": {
+ "build_near_host_ip": "192.168.1.1",
+ "cidr": "24"
+ }
+ }
+
+
+.. _weights:
+
+Weights
+-------
+
+.. figure:: /_static/images/nova-weighting-hosts.png
+
+When resourcing instances, the filter scheduler filters and weights each host
+in the list of acceptable hosts. Each time the scheduler selects a host, it
+virtually consumes resources on it and subsequent selections are adjusted
+accordingly. This process is useful when the customer asks for the same large
+amount of instances because a weight is computed for each requested instance.
+
+In order to prioritize one weigher against another, all the weighers have to
+define a multiplier that will be applied before computing the weight for a node.
+All the weights are normalized beforehand so that the multiplier can be applied
+easily.Therefore the final weight for the object will be::
+
+ weight = w1_multiplier * norm(w1) + w2_multiplier * norm(w2) + ...
+
+Hosts are weighted based on the following config options:
+
+- :oslo.config:option:`filter_scheduler.host_subset_size`
+- :oslo.config:option:`filter_scheduler.weight_classes`
+
+
+``RAMWeigher``
+~~~~~~~~~~~~~~
+
+Compute weight based on available RAM on the compute node.
+Sort with the largest weight winning. If the multiplier,
+:oslo.config:option:`filter_scheduler.ram_weight_multiplier`, is negative, the
+host with least RAM available will win (useful for stacking hosts, instead
+of spreading).
+
+Starting with the Stein release, if per-aggregate value with the key
+``ram_weight_multiplier`` is found, this
+value would be chosen as the ram weight multiplier. Otherwise, it will fall
+back to the :oslo.config:option:`filter_scheduler.ram_weight_multiplier`.
+If more than one value is found for a host in aggregate metadata, the minimum
+value will be used.
+
+
+``CPUWeigher``
+~~~~~~~~~~~~~~
+
+Compute weight based on available vCPUs on the compute node.
+Sort with the largest weight winning. If the multiplier,
+:oslo.config:option:`filter_scheduler.cpu_weight_multiplier`, is negative, the
+host with least CPUs available will win (useful for stacking hosts, instead
+of spreading).
+
+Starting with the Stein release, if per-aggregate value with the key
+``cpu_weight_multiplier`` is found, this
+value would be chosen as the cpu weight multiplier. Otherwise, it will fall
+back to the :oslo.config:option:`filter_scheduler.cpu_weight_multiplier`. If
+more than one value is found for a host in aggregate metadata, the minimum
+value will be used.
+
+
+``DiskWeigher``
+~~~~~~~~~~~~~~~
+
+Hosts are weighted and sorted by free disk space with the
+largest weight winning. If the multiplier is negative, the host with less disk
+space available will win (useful for stacking hosts, instead of spreading).
+
+Starting with the Stein release, if per-aggregate value with the key
+``disk_weight_multiplier`` is found, this
+value would be chosen as the disk weight multiplier. Otherwise, it will fall
+back to the :oslo.config:option:`filter_scheduler.disk_weight_multiplier`. If
+more than one value is found for a host in aggregate metadata, the minimum value
+will be used.
+
+
+``MetricsWeigher``
+~~~~~~~~~~~~~~~~~~
+
+This weigher can compute the weight based on the compute node
+host's various metrics. The to-be weighed metrics and their weighing ratio
+are specified using the :oslo.config:option:`metrics.weight_setting` config
+option. For example:
+
+.. code-block:: ini
+
+ [metrics]
+ weight_setting = name1=1.0, name2=-1.0
+
+You can specify the metrics that are required, along with the weight of those
+that are not and are not available using the
+:oslo.config:option:`metrics.required` and
+:oslo.config:option:`metrics.weight_of_unavailable` config options,
+respectively.
+
+Starting with the Stein release, if per-aggregate value with the key
+`metrics_weight_multiplier` is found, this value would be chosen as the
+metrics weight multiplier. Otherwise, it will fall back to the
+:oslo.config:option:`metrics.weight_multiplier`. If more than
+one value is found for a host in aggregate metadata, the minimum value will
+be used.
+
+
+``IoOpsWeigher``
+~~~~~~~~~~~~~~~~
+
+The weigher can compute the weight based on the compute node
+host's workload. The default is to preferably choose light workload compute
+hosts. If the multiplier is positive, the weigher prefer choosing heavy
+workload compute hosts, the weighing has the opposite effect of the default.
+
+Starting with the Stein release, if per-aggregate value with the key
+``io_ops_weight_multiplier`` is found, this
+value would be chosen as the IO ops weight multiplier. Otherwise, it will fall
+back to the :oslo.config:option:`filter_scheduler.io_ops_weight_multiplier`.
+If more than one value is found for a host in aggregate metadata, the minimum
+value will be used.
+
+``PCIWeigher``
+~~~~~~~~~~~~~~
+
+Compute a weighting based on the number of PCI devices on the
+host and the number of PCI devices requested by the instance. For example,
+given three hosts - one with a single PCI device, one with many PCI devices,
+and one with no PCI devices - nova should prioritise these differently based
+on the demands of the instance. If the instance requests a single PCI device,
+then the first of the hosts should be preferred. Similarly, if the instance
+requests multiple PCI devices, then the second of these hosts would be
+preferred. Finally, if the instance does not request a PCI device, then the
+last of these hosts should be preferred.
+
+For this to be of any value, at least one of the :ref:`PciPassthroughFilter` or
+:ref:`NUMATopologyFilter` filters must be enabled.
+
+Starting with the Stein release, if per-aggregate value with the key
+``pci_weight_multiplier`` is found, this
+value would be chosen as the pci weight multiplier. Otherwise, it will fall
+back to the :oslo.config:option:`filter_scheduler.pci_weight_multiplier`.
+If more than one value is found for a host in aggregate metadata, the
+minimum value will be used.
+
+.. important::
+
+ Only positive values are allowed for the multiplier of this weigher as a
+ negative value would force non-PCI instances away from non-PCI hosts, thus,
+ causing future scheduling issues.
+
+``ServerGroupSoftAffinityWeigher``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The weigher can compute the weight based
+on the number of instances that run on the same server group. The largest
+weight defines the preferred host for the new instance. For the multiplier
+only a positive value is allowed for the calculation.
+
+Starting with the Stein release, if per-aggregate value with the key
+``soft_affinity_weight_multiplier`` is
+found, this value would be chosen as the soft affinity weight multiplier.
+Otherwise, it will fall back to the
+:oslo.config:option:`filter_scheduler.soft_affinity_weight_multiplier`.
+If more than one value is found for a host in aggregate metadata, the
+minimum value will be used.
+
+``ServerGroupSoftAntiAffinityWeigher``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The weigher can compute the weight based on the number of instances that run on
+the same server group as a negative value. The largest weight defines the
+preferred host for the new instance. For the multiplier only a positive value
+is allowed for the calculation.
+
+Starting with the Stein release, if per-aggregate value with the key
+``soft_anti_affinity_weight_multiplier`` is found, this value would be chosen
+as the soft anti-affinity weight multiplier. Otherwise, it will fall back to
+the
+:oslo.config:option:`filter_scheduler.soft_anti_affinity_weight_multiplier`.
+If more than one value is found for a host in aggregate metadata, the minimum
+value will be used.
+
+``BuildFailureWeigher``
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Weigh hosts by the number of recent failed boot attempts.
+It considers the build failure counter and can negatively weigh hosts with
+recent failures. This avoids taking computes fully out of rotation.
+
+Starting with the Stein release, if per-aggregate value with the key
+``build_failure_weight_multiplier`` is found, this value would be chosen as the
+build failure weight multiplier. Otherwise, it will fall back to the
+:oslo.config:option:`filter_scheduler.build_failure_weight_multiplier`. If
+more than one value is found for a host in aggregate metadata, the minimum
+value will be used.
+
+.. _cross-cell-weigher:
+
+``CrossCellWeigher``
+~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 21.0.0 (Ussuri)
+
+Weighs hosts based on which cell they are in. "Local" cells are preferred when
+moving an instance. Use configuration option
+:oslo.config:option:`filter_scheduler.cross_cell_move_weight_multiplier` to
+control the weight. If per-aggregate value with the key
+`cross_cell_move_weight_multiplier` is found, this value would be chosen as the
+cross-cell move weight multiplier. Otherwise, it will fall back to the
+:oslo.config:option:`filter_scheduler.cross_cell_move_weight_multiplier`. If
+more than one value is found for a host in aggregate metadata, the minimum
+value will be used.
+
+
+Utilization-aware scheduling
+----------------------------
+
+.. warning::
+
+ This feature is poorly tested and may not work as expected. It may be
+ removed in a future release. Use at your own risk.
+
+It is possible to schedule instances using advanced scheduling decisions. These
+decisions are made based on enhanced usage statistics encompassing data like
+memory cache utilization, memory bandwidth utilization, or network bandwidth
+utilization. This is disabled by default. The administrator can configure how
+the metrics are weighted in the configuration file by using the
+:oslo.config:option:`metrics.weight_setting` config option. For example to
+configure ``metric1`` with ``ratio1`` and ``metric2`` with ``ratio2``:
+
+.. code-block:: ini
+
+ [metrics]
+ weight_setting = "metric1=ratio1, metric2=ratio2"
+
+
+Allocation ratios
+-----------------
+
+Allocation ratios allow for the overcommit of host resources.
+The following configuration options exist to control allocation ratios
+per compute node to support this overcommit of resources:
+
+* :oslo.config:option:`cpu_allocation_ratio` allows overriding the ``VCPU``
+ inventory allocation ratio for a compute node
+* :oslo.config:option:`ram_allocation_ratio` allows overriding the ``MEMORY_MB``
+ inventory allocation ratio for a compute node
+* :oslo.config:option:`disk_allocation_ratio` allows overriding the ``DISK_GB``
+ inventory allocation ratio for a compute node
+
+Prior to the 19.0.0 Stein release, if left unset, the ``cpu_allocation_ratio``
+defaults to 16.0, the ``ram_allocation_ratio`` defaults to 1.5, and the
+``disk_allocation_ratio`` defaults to 1.0.
+
+Starting with the 19.0.0 Stein release, the following configuration options
+control the initial allocation ratio values for a compute node:
+
+* :oslo.config:option:`initial_cpu_allocation_ratio` the initial VCPU
+ inventory allocation ratio for a new compute node record, defaults to 16.0
+* :oslo.config:option:`initial_ram_allocation_ratio` the initial MEMORY_MB
+ inventory allocation ratio for a new compute node record, defaults to 1.5
+* :oslo.config:option:`initial_disk_allocation_ratio` the initial DISK_GB
+ inventory allocation ratio for a new compute node record, defaults to 1.0
+
+Scheduling considerations
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The allocation ratio configuration is used both during reporting of compute
+node `resource provider inventory`_ to the placement service and during
+scheduling.
+
+.. _resource provider inventory: https://docs.openstack.org/api-ref/placement/?expanded=#resource-provider-inventories
+
+Usage scenarios
+~~~~~~~~~~~~~~~
+
+Since allocation ratios can be set via nova configuration, host aggregate
+metadata and the placement API, it can be confusing to know which should be
+used. This really depends on your scenario. A few common scenarios are detailed
+here.
+
+1. When the deployer wants to **always** set an override value for a resource
+ on a compute node, the deployer should ensure that the
+ :oslo.config:option:`DEFAULT.cpu_allocation_ratio`,
+ :oslo.config:option:`DEFAULT.ram_allocation_ratio` and
+ :oslo.config:option:`DEFAULT.disk_allocation_ratio` configuration options
+ are set to a non-None value.
+ This will make the ``nova-compute`` service overwrite any externally-set
+ allocation ratio values set via the placement REST API.
+
+2. When the deployer wants to set an **initial** value for a compute node
+ allocation ratio but wants to allow an admin to adjust this afterwards
+ without making any configuration file changes, the deployer should set the
+ :oslo.config:option:`DEFAULT.initial_cpu_allocation_ratio`,
+ :oslo.config:option:`DEFAULT.initial_ram_allocation_ratio` and
+ :oslo.config:option:`DEFAULT.initial_disk_allocation_ratio` configuration
+ options and then manage the allocation ratios using the placement REST API
+ (or `osc-placement`_ command line interface).
+ For example:
+
+ .. code-block:: console
+
+ $ openstack resource provider inventory set \
+ --resource VCPU:allocation_ratio=1.0 \
+ --amend 815a5634-86fb-4e1e-8824-8a631fee3e06
+
+3. When the deployer wants to **always** use the placement API to set
+ allocation ratios, then the deployer should ensure that the
+ :oslo.config:option:`DEFAULT.cpu_allocation_ratio`,
+ :oslo.config:option:`DEFAULT.ram_allocation_ratio` and
+ :oslo.config:option:`DEFAULT.disk_allocation_ratio` configuration options
+ are set to a None and then manage the allocation ratios using the placement
+ REST API (or `osc-placement`_ command line interface).
+
+ This scenario is the workaround for
+ `bug 1804125 `_.
+
+.. versionchanged:: 19.0.0 (Stein)
+
+ The :oslo.config:option:`DEFAULT.initial_cpu_allocation_ratio`,
+ :oslo.config:option:`DEFAULT.initial_ram_allocation_ratio` and
+ :oslo.config:option:`DEFAULT.initial_disk_allocation_ratio` configuration
+ options were introduced in Stein. Prior to this release, setting any of
+ :oslo.config:option:`DEFAULT.cpu_allocation_ratio`,
+ :oslo.config:option:`DEFAULT.ram_allocation_ratio` or
+ :oslo.config:option:`DEFAULT.disk_allocation_ratio` to a non-null value
+ would ensure the user-configured value was always overriden.
+
+.. _osc-placement: https://docs.openstack.org/osc-placement/latest/index.html
+
+.. _hypervisor-specific-considerations:
+
+Hypervisor-specific considerations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Nova provides three configuration options that can be used to set aside some
+number of resources that will not be consumed by an instance, whether these
+resources are overcommitted or not:
+
+- :oslo.config:option:`reserved_host_cpus`,
+- :oslo.config:option:`reserved_host_memory_mb`
+- :oslo.config:option:`reserved_host_disk_mb`
+
+Some virt drivers may benefit from the use of these options to account for
+hypervisor-specific overhead.
+
+HyperV
+ Hyper-V creates a VM memory file on the local disk when an instance starts.
+ The size of this file corresponds to the amount of RAM allocated to the
+ instance.
+
+ You should configure the
+ :oslo.config:option:`reserved_host_disk_mb` config option to
+ account for this overhead, based on the amount of memory available
+ to instances.
+
+
+Cells considerations
+--------------------
+
+By default cells are enabled for scheduling new instances but they can be
+disabled (new schedules to the cell are blocked). This may be useful for
+users while performing cell maintenance, failures or other interventions. It is
+to be noted that creating pre-disabled cells and enabling/disabling existing
+cells should either be followed by a restart or SIGHUP of the nova-scheduler
+service for the changes to take effect.
+
+Command-line interface
+~~~~~~~~~~~~~~~~~~~~~~
+
+The :command:`nova-manage` command-line client supports the cell-disable
+related commands. To enable or disable a cell, use
+:command:`nova-manage cell_v2 update_cell` and to create pre-disabled cells,
+use :command:`nova-manage cell_v2 create_cell`. See the
+:ref:`man-page-cells-v2` man page for details on command usage.
+
+
+.. _compute-capabilities-as-traits:
+
+Compute capabilities as traits
+------------------------------
+
+.. versionadded:: 19.0.0 (Stein)
+
+The ``nova-compute`` service will report certain ``COMPUTE_*`` traits based on
+its compute driver capabilities to the placement service. The traits will be
+associated with the resource provider for that compute service. These traits
+can be used during scheduling by configuring flavors with
+:ref:`Required traits ` or
+:ref:`Forbidden traits `. For example, if you
+have a host aggregate with a set of compute nodes that support multi-attach
+volumes, you can restrict a flavor to that aggregate by adding the
+``trait:COMPUTE_VOLUME_MULTI_ATTACH=required`` extra spec to the flavor and
+then restrict the flavor to the aggregate
+:ref:`as normal `.
+
+Here is an example of a libvirt compute node resource provider that is
+exposing some CPU features as traits, driver capabilities as traits, and a
+custom trait denoted by the ``CUSTOM_`` prefix:
+
+.. code-block:: console
+
+ $ openstack --os-placement-api-version 1.6 resource provider trait list \
+ > d9b3dbc4-50e2-42dd-be98-522f6edaab3f --sort-column name
+ +---------------------------------------+
+ | name |
+ +---------------------------------------+
+ | COMPUTE_DEVICE_TAGGING |
+ | COMPUTE_NET_ATTACH_INTERFACE |
+ | COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG |
+ | COMPUTE_TRUSTED_CERTS |
+ | COMPUTE_VOLUME_ATTACH_WITH_TAG |
+ | COMPUTE_VOLUME_EXTEND |
+ | COMPUTE_VOLUME_MULTI_ATTACH |
+ | CUSTOM_IMAGE_TYPE_RBD |
+ | HW_CPU_X86_MMX |
+ | HW_CPU_X86_SSE |
+ | HW_CPU_X86_SSE2 |
+ | HW_CPU_X86_SVM |
+ +---------------------------------------+
+
+**Rules**
+
+There are some rules associated with capability-defined traits.
+
+1. The compute service "owns" these traits and will add/remove them when the
+ ``nova-compute`` service starts and when the ``update_available_resource``
+ periodic task runs, with run intervals controlled by config option
+ :oslo.config:option:`update_resources_interval`.
+
+2. The compute service will not remove any custom traits set on the resource
+ provider externally, such as the ``CUSTOM_IMAGE_TYPE_RBD`` trait in the
+ example above.
+
+3. If compute-owned traits are removed from the resource provider externally,
+ for example by running ``openstack resource provider trait delete ``,
+ the compute service will add its traits again on restart or SIGHUP.
+
+4. If a compute trait is set on the resource provider externally which is not
+ supported by the driver, for example by adding the ``COMPUTE_VOLUME_EXTEND``
+ trait when the driver does not support that capability, the compute service
+ will automatically remove the unsupported trait on restart or SIGHUP.
+
+5. Compute capability traits are standard traits defined in the `os-traits`_
+ library.
+
+.. _os-traits: https://opendev.org/openstack/os-traits/src/branch/master/os_traits/compute
+
+:ref:`Further information on capabilities and traits
+` can be found in the
+:doc:`Technical Reference Deep Dives section `.
+
+
+.. _custom-scheduler-filters:
+
+Writing Your Own Filter
+-----------------------
+
+To create **your own filter**, you must inherit from |BaseHostFilter| and
+implement one method: ``host_passes``. This method should return ``True`` if a
+host passes the filter and return ``False`` elsewhere. It takes two parameters:
+
+* the ``HostState`` object allows to get attributes of the host
+* the ``RequestSpec`` object describes the user request, including the flavor,
+ the image and the scheduler hints
+
+For further details about each of those objects and their corresponding
+attributes, refer to the codebase (at least by looking at the other filters
+code) or ask for help in the ``#openstack-nova`` IRC channel.
+
+In addition, if your custom filter uses non-standard extra specs, you must
+register validators for these extra specs. Examples of validators can be found
+in the ``nova.api.validation.extra_specs`` module. These should be registered
+via the ``nova.api.extra_spec_validator`` `entrypoint`__.
+
+The module containing your custom filter(s) must be packaged and available in
+the same environment(s) that the nova controllers, or specifically the
+:program:`nova-scheduler` and :program:`nova-api` services, are available in.
+As an example, consider the following sample package, which is the `minimal
+structure`__ for a standard, setuptools-based Python package:
+
+.. code-block:: none
+
+ acmefilter/
+ acmefilter/
+ __init__.py
+ validators.py
+ setup.py
+
+Where ``__init__.py`` contains:
+
+.. code-block:: python
+
+ from oslo_log import log as logging
+ from nova.scheduler import filters
+
+ LOG = logging.getLogger(__name__)
+
+ class AcmeFilter(filters.BaseHostFilter):
+
+ def host_passes(self, host_state, spec_obj):
+ extra_spec = spec_obj.flavor.extra_specs.get('acme:foo')
+ LOG.info("Extra spec value was '%s'", extra_spec)
+
+ # do meaningful stuff here...
+
+ return True
+
+``validators.py`` contains:
+
+.. code-block:: python
+
+ from nova.api.validation.extra_specs import base
+
+ def register():
+ validators = [
+ base.ExtraSpecValidator(
+ name='acme:foo',
+ description='My custom extra spec.'
+ value={
+ 'type': str,
+ 'enum': [
+ 'bar',
+ 'baz',
+ ],
+ },
+ ),
+ ]
+
+ return validators
+
+``setup.py`` contains:
+
+.. code-block:: python
+
+ from setuptools import setup
+
+ setup(
+ name='acmefilter',
+ version='0.1',
+ description='My custom filter',
+ packages=[
+ 'acmefilter'
+ ],
+ entry_points={
+ 'nova.api.extra_spec_validators': [
+ 'acme = acmefilter.validators',
+ ],
+ },
+ )
+
+To enable this, you would set the following in :file:`nova.conf`:
+
+.. code-block:: ini
+
+ [filter_scheduler]
+ available_filters = nova.scheduler.filters.all_filters
+ available_filters = acmefilter.AcmeFilter
+ enabled_filters = ComputeFilter,AcmeFilter
+
+.. note::
+
+ You **must** add custom filters to the list of available filters using the
+ :oslo.config:option:`filter_scheduler.available_filters` config option in
+ addition to enabling them via the
+ :oslo.config:option:`filter_scheduler.enabled_filters` config option. The
+ default ``nova.scheduler.filters.all_filters`` value for the former only
+ includes the filters shipped with nova.
+
+With these settings, all of the standard nova filters and the custom
+``AcmeFilter`` filter are available to the scheduler, but just the
+``ComputeFilter`` and ``AcmeFilter`` will be used on each request.
+
+__ https://packaging.python.org/specifications/entry-points/
+__ https://python-packaging.readthedocs.io/en/latest/minimal.html
+
+Writing your own weigher
+------------------------
+
+To create your own weigher, you must inherit from |BaseHostWeigher|
+A weigher can implement both the ``weight_multiplier`` and ``_weight_object``
+methods or just implement the ``weight_objects`` method. ``weight_objects``
+method is overridden only if you need access to all objects in order to
+calculate weights, and it just return a list of weights, and not modify the
+weight of the object directly, since final weights are normalized and computed
+by ``weight.BaseWeightHandler``.
+
+
+.. |BaseHostFilter| replace:: :class:`BaseHostFilter `
+.. |BaseHostWeigher| replace:: :class:`BaseHostFilter `
diff --git a/doc/source/admin/secure-boot.rst b/doc/source/admin/secure-boot.rst
new file mode 100644
index 00000000000..3e2ccb084b0
--- /dev/null
+++ b/doc/source/admin/secure-boot.rst
@@ -0,0 +1,136 @@
+===========
+Secure Boot
+===========
+
+.. versionadded:: 14.0.0 (Newton)
+
+.. versionchanged:: 23.0.0 (Wallaby)
+
+ Added support for Secure Boot to the libvirt driver.
+
+Nova supports configuring `UEFI Secure Boot`__ for guests. Secure Boot aims to
+ensure no unsigned kernel code runs on a machine.
+
+.. __: https://en.wikipedia.org/wiki/Secure_boot
+
+
+Enabling Secure Boot
+--------------------
+
+Currently the configuration of UEFI guest bootloaders is only supported when
+using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type`
+of ``kvm`` or ``qemu`` or when using the Hyper-V compute driver with certain
+machine types. In both cases, it requires the guests also be configured with a
+:doc:`UEFI bootloader `.
+
+With these requirements satisfied, you can verify UEFI Secure Boot support by
+inspecting the traits on the compute node's resource provider:
+
+.. code:: bash
+
+ $ COMPUTE_UUID=$(openstack resource provider list --name $HOST -f value -c uuid)
+ $ openstack resource provider trait list $COMPUTE_UUID | grep COMPUTE_SECURITY_UEFI_SECURE_BOOT
+ | COMPUTE_SECURITY_UEFI_SECURE_BOOT |
+
+
+Configuring a flavor or image
+-----------------------------
+
+Configuring UEFI Secure Boot for guests varies depending on the compute driver
+in use. In all cases, a :doc:`UEFI guest bootloader ` must be configured
+for the guest but there are also additional requirements depending on the
+compute driver in use.
+
+.. rubric:: Libvirt
+
+As the name would suggest, UEFI Secure Boot requires that a UEFI bootloader be
+configured for guests. When this is done, UEFI Secure Boot support can be
+configured using the :nova:extra-spec:`os:secure_boot` extra spec or equivalent
+image metadata property. For example, to configure an image that meets both of
+these requirements:
+
+.. code-block:: bash
+
+ $ openstack image set \
+ --property hw_firmware_type=uefi \
+ --property os_secure_boot=required \
+ $IMAGE
+
+.. note::
+
+ On x86_64 hosts, enabling secure boot also requires configuring use of the
+ Q35 machine type. This can be configured on a per-guest basis using the
+ ``hw_machine_type`` image metadata property or automatically for all guests
+ created on a host using the :oslo.config:option:`libvirt.hw_machine_type`
+ config option.
+
+It is also possible to explicitly request that secure boot be disabled. This is
+the default behavior, so this request is typically useful when an admin wishes
+to explicitly prevent a user requesting secure boot by uploading their own
+image with relevant image properties. For example, to disable secure boot via
+the flavor:
+
+.. code-block:: bash
+
+ $ openstack flavor set --property os:secure_boot=disabled $FLAVOR
+
+Finally, it is possible to request that secure boot be enabled if the host
+supports it. This is only possible via the image metadata property. When this
+is requested, secure boot will only be enabled if the host supports this
+feature and the other constraints, namely that a UEFI guest bootloader is
+configured, are met. For example:
+
+.. code-block:: bash
+
+ $ openstack image set --property os_secure_boot=optional $IMAGE
+
+.. note::
+
+ If both the image metadata property and flavor extra spec are provided,
+ they must match. If they do not, an error will be raised.
+
+.. rubric:: Hyper-V
+
+Like libvirt, configuring a guest for UEFI Secure Boot support also requires
+that it be configured with a UEFI bootloader: As noted in :doc:`uefi`, it is
+not possible to do this explicitly in Hyper-V. Rather, you should configure the
+guest to use the *Generation 2* machine type. In addition to this, the Hyper-V
+compute driver also requires that the OS type be configured.
+
+When both of these constraints are met, you can configure UEFI Secure Boot
+support using the :nova:extra-spec:`os:secure_boot` extra spec or equivalent
+image metadata property. For example, to configure an image that meets all the
+above requirements:
+
+.. code-block:: bash
+
+ $ openstack image set \
+ --property hw_machine_type=hyperv-gen2 \
+ --property os_type=windows \
+ --property os_secure_boot=required \
+ $IMAGE
+
+As with the libvirt driver, it is also possible to request that secure boot be
+disabled. This is the default behavior, so this is typically useful when an
+admin wishes to explicitly prevent a user requesting secure boot. For example,
+to disable secure boot via the flavor:
+
+.. code-block:: bash
+
+ $ openstack flavor set --property os:secure_boot=disabled $IMAGE
+
+However, unlike the libvirt driver, the Hyper-V driver does not respect the
+``optional`` value for the image metadata property. If this is configured, it
+will be silently ignored.
+
+
+References
+----------
+
+* `Allow Secure Boot (SB) for QEMU- and KVM-based guests (spec)`__
+* `Securing Secure Boot with System Management Mode`__
+* `Generation 2 virtual machine security settings for Hyper-V`__
+
+.. __: https://specs.openstack.org/openstack/nova-specs/specs/wallaby/approved/allow-secure-boot-for-qemu-kvm-guests.html
+.. __: http://events17.linuxfoundation.org/sites/events/files/slides/kvmforum15-smm.pdf
+.. __: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/learn-more/generation-2-virtual-machine-security-settings-for-hyper-v
diff --git a/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
new file mode 100644
index 00000000000..0e6206d0b1f
--- /dev/null
+++ b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
@@ -0,0 +1,198 @@
+==========================================
+Secure live migration with QEMU-native TLS
+==========================================
+
+Context
+~~~~~~~
+
+The encryption offered by nova's
+:oslo.config:option:`libvirt.live_migration_tunnelled` does not secure
+all the different migration streams of a nova instance, namely: guest
+RAM, device state, and disks (via NBD) when using non-shared storage.
+Further, the "tunnelling via libvirtd" has inherent limitations: (a) it
+cannot handle live migration of disks in a non-shared storage setup
+(a.k.a. "block migration"); and (b) has a huge performance overhead and
+latency, because it burns more CPU and memory bandwidth due to increased
+number of data copies on both source and destination hosts.
+
+To solve this existing limitation, QEMU and libvirt have gained (refer
+:ref:`below ` for version details) support for "native
+TLS", i.e. TLS built into QEMU. This will secure all data transports,
+including disks that are not on shared storage, without incurring the
+limitations of the "tunnelled via libvirtd" transport.
+
+To take advantage of the "native TLS" support in QEMU and libvirt, nova
+has introduced new configuration attribute
+:oslo.config:option:`libvirt.live_migration_with_native_tls`.
+
+
+.. _`Prerequisites`:
+
+Prerequisites
+~~~~~~~~~~~~~
+
+(1) Version requirement: This feature needs at least libvirt 4.4.0 and
+ QEMU 2.11.
+
+(2) A pre-configured TLS environment—i.e. CA, server, and client
+ certificates, their file permissions, et al—must be "correctly"
+ configured (typically by an installer tool) on all relevant compute
+ nodes. To simplify your PKI (Public Key Infrastructure) setup, use
+ deployment tools that take care of handling all the certificate
+ lifecycle management. For example, refer to the "`TLS everywhere
+ `__"
+ guide from the TripleO project.
+
+(3) Password-less SSH setup for all relevant compute nodes.
+
+(4) On all relevant compute nodes, ensure the TLS-related config
+ attributes in ``/etc/libvirt/qemu.conf`` are in place::
+
+ default_tls_x509_cert_dir = "/etc/pki/qemu"
+ default_tls_x509_verify = 1
+
+ If it is not already configured, modify ``/etc/sysconfig/libvirtd``
+ on both (ComputeNode1 & ComputeNode2) to listen for TCP/IP
+ connections::
+
+ LIBVIRTD_ARGS="--listen"
+
+ Then, restart the libvirt daemon (also on both nodes)::
+
+ $ systemctl restart libvirtd
+
+ Refer to the "`Related information`_" section on a note about the
+ other TLS-related configuration attributes in
+ ``/etc/libvirt/qemu.conf``.
+
+
+Validating your TLS environment on compute nodes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Assuming you have two compute hosts (``ComputeNode1``, and
+``ComputeNode2``) run the :command:`virt-pki-validate` tool (comes with
+the ``libvirt-client`` package on your Linux distribution) on both the
+nodes to ensure all the necessary PKI files are configured are
+configured::
+
+ [ComputeNode1]$ virt-pki-validate
+ Found /usr/bin/certtool
+ Found CA certificate /etc/pki/CA/cacert.pem for TLS Migration Test
+ Found client certificate /etc/pki/libvirt/clientcert.pem for ComputeNode1
+ Found client private key /etc/pki/libvirt/private/clientkey.pem
+ Found server certificate /etc/pki/libvirt/servercert.pem for ComputeNode1
+ Found server private key /etc/pki/libvirt/private/serverkey.pem
+ Make sure /etc/sysconfig/libvirtd is setup to listen to
+ TCP/IP connections and restart the libvirtd service
+
+ [ComputeNode2]$ virt-pki-validate
+ Found /usr/bin/certtool
+ Found CA certificate /etc/pki/CA/cacert.pem for TLS Migration Test
+ Found client certificate /etc/pki/libvirt/clientcert.pem for ComputeNode2
+ Found client private key /etc/pki/libvirt/private/clientkey.pem
+ Found server certificate /etc/pki/libvirt/servercert.pem for ComputeNode2
+ Found server private key /etc/pki/libvirt/private/serverkey.pem
+ Make sure /etc/sysconfig/libvirtd is setup to listen to
+ TCP/IP connections and restart the libvirtd service
+
+
+Other TLS environment related checks on compute nodes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+**IMPORTANT**: Ensure that the permissions of certificate files and keys
+in ``/etc/pki/qemu/*`` directory on both source *and* destination
+compute nodes to be the following ``0640`` with ``root:qemu`` as the
+group/user. For example, on a Fedora-based system::
+
+ $ ls -lasrtZ /etc/pki/qemu
+ total 32
+ 0 drwxr-xr-x. 10 root root system_u:object_r:cert_t:s0 110 Dec 10 10:39 ..
+ 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1464 Dec 10 11:08 ca-cert.pem
+ 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1558 Dec 10 11:08 server-cert.pem
+ 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1619 Dec 10 11:09 client-cert.pem
+ 8 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 8180 Dec 10 11:09 client-key.pem
+ 8 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 8177 Dec 11 05:35 server-key.pem
+ 0 drwxr-xr-x. 2 root root unconfined_u:object_r:cert_t:s0 146 Dec 11 06:01 .
+
+
+Performing the migration
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+(1) On all relevant compute nodes, enable the
+ :oslo.config:option:`libvirt.live_migration_with_native_tls`
+ configuration attribute and set the
+ :oslo.config:option:`libvirt.live_migration_scheme`
+ configuration attribute to tls::
+
+ [libvirt]
+ live_migration_with_native_tls = true
+ live_migration_scheme = tls
+
+ .. note::
+ Setting both
+ :oslo.config:option:`libvirt.live_migration_with_native_tls` and
+ :oslo.config:option:`libvirt.live_migration_tunnelled` at the
+ same time is invalid (and disallowed).
+
+ .. note::
+ Not setting
+ :oslo.config:option:`libvirt.live_migration_scheme` to ``tls``
+ will result in libvirt using the unencrypted TCP connection
+ without displaying any error or a warning in the logs.
+
+ And restart the ``nova-compute`` service::
+
+ $ systemctl restart openstack-nova-compute
+
+(2) Now that all TLS-related configuration is in place, migrate guests
+ (with or without shared storage) from ``ComputeNode1`` to
+ ``ComputeNode2``. Refer to the :doc:`live-migration-usage` document
+ on details about live migration.
+
+
+.. _`Related information`:
+
+Related information
+~~~~~~~~~~~~~~~~~~~
+
+- If you have the relevant libvirt and QEMU versions (mentioned in the
+ "`Prerequisites`_" section earlier), then using the
+ :oslo.config:option:`libvirt.live_migration_with_native_tls` is
+ strongly recommended over the more limited
+ :oslo.config:option:`libvirt.live_migration_tunnelled` option, which
+ is intended to be deprecated in future.
+
+
+- There are in total *nine* TLS-related config options in
+ ``/etc/libvirt/qemu.conf``::
+
+ default_tls_x509_cert_dir
+ default_tls_x509_verify
+ nbd_tls
+ nbd_tls_x509_cert_dir
+ migrate_tls_x509_cert_dir
+
+ vnc_tls_x509_cert_dir
+ spice_tls_x509_cert_dir
+ vxhs_tls_x509_cert_dir
+ chardev_tls_x509_cert_dir
+
+ If you set both ``default_tls_x509_cert_dir`` and
+ ``default_tls_x509_verify`` parameters for all certificates, there is
+ no need to specify any of the other ``*_tls*`` config options.
+
+ The intention (of libvirt) is that you can just use the
+ ``default_tls_x509_*`` config attributes so that you don't need to set
+ any other ``*_tls*`` parameters, _unless_ you need different
+ certificates for some services. The rationale for that is that some
+ services (e.g. migration / NBD) are only exposed to internal
+ infrastructure; while some sevices (VNC, Spice) might be exposed
+ publically, so might need different certificates. For OpenStack this
+ does not matter, though, we will stick with the defaults.
+
+- If they are not already open, ensure you open up these TCP ports on
+ your firewall: ``16514`` (where the authenticated and encrypted TCP/IP
+ socket will be listening on) and ``49152-49215`` (for regular
+ migration) on all relevant compute nodes. (Otherwise you get
+ ``error: internal error: unable to execute QEMU command
+ 'drive-mirror': Failed to connect socket: No route to host``).
diff --git a/doc/source/admin/security-groups.rst b/doc/source/admin/security-groups.rst
index 9a55ef2800e..4419111fe75 100644
--- a/doc/source/admin/security-groups.rst
+++ b/doc/source/admin/security-groups.rst
@@ -12,31 +12,17 @@ that has no other defined security group. Unless you change the default, this
security group denies all incoming traffic and allows only outgoing traffic to
your instance.
-You can use the ``allow_same_net_traffic`` option in the
-``/etc/nova/nova.conf`` file to globally control whether the rules apply to
-hosts which share a network. There are two possible values:
-
-``True`` (default)
- Hosts on the same subnet are not filtered and are allowed to pass all types
- of traffic between them. On a flat network, this allows all instances from
- all projects unfiltered communication. With VLAN networking, this allows
- access between instances within the same project. You can also simulate this
- setting by configuring the default security group to allow all traffic from
- the subnet.
-
-``False``
- Security groups are enforced for all connections.
-
-Additionally, the number of maximum rules per security group is controlled by
-the ``security_group_rules`` and the number of allowed security groups per
-project is controlled by the ``security_groups`` quota (see
-:ref:`manage-quotas`).
+Security groups (and their quota) are managed by :neutron-doc:`Neutron, the
+networking service `.
-List and view current security groups
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Working with security groups
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
From the command-line you can get a list of security groups for the project,
-using the :command:`openstack` and :command:`nova` commands:
+using the :command:`openstack` commands.
+
+List and view current security groups
+-------------------------------------
#. Ensure your system variables are set for the user and project for which you
are checking security group rules. For example:
@@ -83,7 +69,7 @@ using the :command:`openstack` and :command:`nova` commands:
allowed from all IPs.
Create a security group
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
When adding a new security group, you should pick a descriptive but brief name.
This name shows up in brief descriptions of the instances that use it where the
@@ -203,7 +189,7 @@ or "secgrp1".
+--------------------------------------+-------------+-----------+-----------------+-----------------------+
Delete a security group
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
#. Ensure your system variables are set for the user and project for which you
are deleting a security group.
@@ -221,7 +207,7 @@ Delete a security group
$ openstack security group delete global_http
Create security group rules for a cluster of instances
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------------
Source Groups are a special, dynamic way of defining the CIDR of allowed
sources. The user specifies a Source Group (Security Group name), and all the
diff --git a/doc/source/admin/security.rst b/doc/source/admin/security.rst
index 515e91bed1d..5743023e739 100644
--- a/doc/source/admin/security.rst
+++ b/doc/source/admin/security.rst
@@ -38,3 +38,22 @@ encryption in the ``metadata_agent.ini`` file.
.. code-block:: ini
nova_client_priv_key = PATH_TO_KEY
+
+
+Securing live migration streams with QEMU-native TLS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is strongly recommended to secure all the different live migration
+streams of a nova instance—i.e. guest RAM, device state, and disks (via
+NBD) when using non-shared storage. For further details on how to set
+this up, refer to the
+:doc:`secure-live-migration-with-qemu-native-tls` document.
+
+
+Mitigation for MDS (Microarchitectural Data Sampling) security flaws
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is strongly recommended to patch all compute nodes and nova instances
+against the processor-related security flaws, such as MDS (and other
+previous vulnerabilities). For details on applying mitigation for the
+MDS flaws, refer to :ref:`mitigation-for-Intel-MDS-security-flaws`.
diff --git a/doc/source/admin/services.rst b/doc/source/admin/services.rst
index 74ee25273f1..a3f59e1fdfb 100644
--- a/doc/source/admin/services.rst
+++ b/doc/source/admin/services.rst
@@ -10,22 +10,13 @@ enable the ``nova-compute`` service.
.. code-block:: console
$ openstack compute service list
- +----+--------------+------------+----------+---------+-------+--------------+
- | ID | Binary | Host | Zone | Status | State | Updated At |
- +----+--------------+------------+----------+---------+-------+--------------+
- | 4 | nova- | controller | internal | enabled | up | 2016-12-20T0 |
- | | consoleauth | | | | | 0:44:48.0000 |
- | | | | | | | 00 |
- | 5 | nova- | controller | internal | enabled | up | 2016-12-20T0 |
- | | scheduler | | | | | 0:44:48.0000 |
- | | | | | | | 00 |
- | 6 | nova- | controller | internal | enabled | up | 2016-12-20T0 |
- | | conductor | | | | | 0:44:54.0000 |
- | | | | | | | 00 |
- | 9 | nova-compute | compute | nova | enabled | up | 2016-10-21T0 |
- | | | | | | | 2:35:03.0000 |
- | | | | | | | 00 |
- +----+--------------+------------+----------+---------+-------+--------------+
+ +----+----------------+------------+----------+---------+-------+----------------------------+
+ | ID | Binary | Host | Zone | Status | State | Updated At |
+ +----+----------------+------------+----------+---------+-------+----------------------------+
+ | 4 | nova-scheduler | controller | internal | enabled | up | 2016-12-20T00:44:48.000000 |
+ | 5 | nova-conductor | controller | internal | enabled | up | 2016-12-20T00:44:54.000000 |
+ | 8 | nova-compute | compute | nova | enabled | up | 2016-10-21T02:35:03.000000 |
+ +----+----------------+------------+----------+---------+-------+----------------------------+
#. Disable a nova service:
@@ -43,22 +34,13 @@ enable the ``nova-compute`` service.
.. code-block:: console
$ openstack compute service list
- +----+--------------+------------+----------+---------+-------+--------------+
- | ID | Binary | Host | Zone | Status | State | Updated At |
- +----+--------------+------------+----------+---------+-------+--------------+
- | 4 | nova- | controller | internal | enabled | up | 2016-12-20T0 |
- | | consoleauth | | | | | 0:44:48.0000 |
- | | | | | | | 00 |
- | 5 | nova- | controller | internal | enabled | up | 2016-12-20T0 |
- | | scheduler | | | | | 0:44:48.0000 |
- | | | | | | | 00 |
- | 6 | nova- | controller | internal | enabled | up | 2016-12-20T0 |
- | | conductor | | | | | 0:44:54.0000 |
- | | | | | | | 00 |
- | 9 | nova-compute | compute | nova | disabled| up | 2016-10-21T0 |
- | | | | | | | 2:35:03.0000 |
- | | | | | | | 00 |
- +----+--------------+------------+----------+---------+-------+--------------+
+ +----+----------------+------------+----------+---------+-------+----------------------------+
+ | ID | Binary | Host | Zone | Status | State | Updated At |
+ +----+----------------+------------+----------+---------+-------+----------------------------+
+ | 5 | nova-scheduler | controller | internal | enabled | up | 2016-12-20T00:44:48.000000 |
+ | 6 | nova-conductor | controller | internal | enabled | up | 2016-12-20T00:44:54.000000 |
+ | 9 | nova-compute | compute | nova | disabled| up | 2016-10-21T02:35:03.000000 |
+ +----+----------------+------------+----------+---------+-------+----------------------------+
#. Enable the service:
diff --git a/doc/source/admin/sev.rst b/doc/source/admin/sev.rst
new file mode 100644
index 00000000000..62588070afe
--- /dev/null
+++ b/doc/source/admin/sev.rst
@@ -0,0 +1,279 @@
+.. _amd-sev:
+
+AMD SEV (Secure Encrypted Virtualization)
+=========================================
+
+.. versionadded:: 20.0.0 (Train)
+
+`Secure Encrypted Virtualization (SEV)`__ is a technology from AMD which
+enables the memory for a VM to be encrypted with a key unique to the VM.
+SEV is particularly applicable to cloud computing since it can reduce the
+amount of trust VMs need to place in the hypervisor and administrator of
+their host system.
+
+.. __: https://developer.amd.com/sev/
+
+
+.. _deploying-sev-capable-infrastructure:
+
+Enabling SEV
+------------
+
+First the operator will need to ensure the following prerequisites are met:
+
+- Currently SEV is only supported when using the libvirt compute driver with a
+ :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``.
+
+- At least one of the Nova compute hosts must be AMD hardware capable
+ of supporting SEV. It is entirely possible for the compute plane to
+ be a mix of hardware which can and cannot support SEV, although as
+ per the section on `Permanent limitations`_ below, the maximum
+ number of simultaneously running guests with SEV will be limited by
+ the quantity and quality of SEV-capable hardware available.
+
+In order for users to be able to use SEV, the operator will need to
+perform the following steps:
+
+- Ensure that sufficient memory is reserved on the SEV compute hosts
+ for host-level services to function correctly at all times. This is
+ particularly important when hosting SEV-enabled guests, since they
+ pin pages in RAM, preventing any memory overcommit which may be in
+ normal operation on other compute hosts.
+
+ It is `recommended`__ to achieve this by configuring an ``rlimit`` at
+ the ``/machine.slice`` top-level ``cgroup`` on the host, with all VMs
+ placed inside that. (For extreme detail, see `this discussion on the
+ spec`__.)
+
+ __ http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html#memory-reservation-solutions
+ __ https://review.opendev.org/#/c/641994/2/specs/train/approved/amd-sev-libvirt-support.rst@167
+
+ An alternative approach is to configure the
+ :oslo.config:option:`reserved_host_memory_mb` option in the
+ ``[DEFAULT]`` section of :file:`nova.conf`, based on the expected
+ maximum number of SEV guests simultaneously running on the host, and
+ the details provided in `an earlier version of the AMD SEV spec`__
+ regarding memory region sizes, which cover how to calculate it
+ correctly.
+
+ __ https://specs.openstack.org/openstack/nova-specs/specs/stein/approved/amd-sev-libvirt-support.html#proposed-change
+
+ See `the Memory Locking and Accounting section of the AMD SEV spec`__
+ and `previous discussion for further details`__.
+
+ __ http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html#memory-locking-and-accounting
+ __ https://review.opendev.org/#/c/641994/2/specs/train/approved/amd-sev-libvirt-support.rst@167
+
+- A cloud administrator will need to define one or more SEV-enabled
+ flavors :ref:`as described below `, unless it
+ is sufficient for users to define SEV-enabled images.
+
+Additionally the cloud operator should consider the following optional
+steps:
+
+.. _num_memory_encrypted_guests:
+
+- Configure the :oslo.config:option:`libvirt.num_memory_encrypted_guests`
+ option in :file:`nova.conf` to represent the number of guests an SEV
+ compute node can host concurrently with memory encrypted at the
+ hardware level. For example:
+
+ .. code-block:: ini
+
+ [libvirt]
+ num_memory_encrypted_guests = 15
+
+ This option exists because on AMD SEV-capable hardware, the memory
+ controller has a fixed number of slots for holding encryption keys,
+ one per guest. For example, at the time of writing, earlier
+ generations of hardware only have 15 slots, thereby limiting the
+ number of SEV guests which can be run concurrently to 15. Nova
+ needs to track how many slots are available and used in order to
+ avoid attempting to exceed that limit in the hardware.
+
+ At the time of writing (September 2019), work is in progress to
+ allow QEMU and libvirt to expose the number of slots available on
+ SEV hardware; however until this is finished and released, it will
+ not be possible for Nova to programmatically detect the correct
+ value.
+
+ So this configuration option serves as a stop-gap, allowing the
+ cloud operator the option of providing this value manually. It may
+ later be demoted to a fallback value for cases where the limit
+ cannot be detected programmatically, or even removed altogether when
+ Nova's minimum QEMU version guarantees that it can always be
+ detected.
+
+ .. note::
+
+ When deciding whether to use the default of ``None`` or manually
+ impose a limit, operators should carefully weigh the benefits
+ vs. the risk. The benefits of using the default are a) immediate
+ convenience since nothing needs to be done now, and b) convenience
+ later when upgrading compute hosts to future versions of Nova,
+ since again nothing will need to be done for the correct limit to
+ be automatically imposed. However the risk is that until
+ auto-detection is implemented, users may be able to attempt to
+ launch guests with encrypted memory on hosts which have already
+ reached the maximum number of guests simultaneously running with
+ encrypted memory. This risk may be mitigated by other limitations
+ which operators can impose, for example if the smallest RAM
+ footprint of any flavor imposes a maximum number of simultaneously
+ running guests which is less than or equal to the SEV limit.
+
+- Configure :oslo.config:option:`ram_allocation_ratio` on all SEV-capable
+ compute hosts to ``1.0``. Use of SEV requires locking guest memory, meaning
+ it is not possible to overcommit host memory.
+
+ Alternatively, you can explicitly configure small pages for instances using
+ the :nova:extra-spec:`hw:mem_page_size` flavor extra spec and equivalent
+ image metadata property. For more information, see :doc:`huge-pages`.
+
+- Configure :oslo.config:option:`libvirt.hw_machine_type` on all
+ SEV-capable compute hosts to include ``x86_64=q35``, so that all
+ x86_64 images use the ``q35`` machine type by default. (Currently
+ Nova defaults to the ``pc`` machine type for the ``x86_64``
+ architecture, although `it is expected that this will change in the
+ future`__.)
+
+ Changing the default from ``pc`` to ``q35`` makes the creation and
+ configuration of images by users more convenient by removing the
+ need for the ``hw_machine_type`` property to be set to ``q35`` on
+ every image for which SEV booting is desired.
+
+ .. caution::
+
+ Consider carefully whether to set this option. It is
+ particularly important since a limitation of the implementation
+ prevents the user from receiving an error message with a helpful
+ explanation if they try to boot an SEV guest when neither this
+ configuration option nor the image property are set to select
+ a ``q35`` machine type.
+
+ On the other hand, setting it to ``q35`` may have other
+ undesirable side-effects on other images which were expecting to
+ be booted with ``pc``, so it is suggested to set it on a single
+ compute node or aggregate, and perform careful testing of typical
+ images before rolling out the setting to all SEV-capable compute
+ hosts.
+
+ __ https://bugs.launchpad.net/nova/+bug/1780138
+
+
+.. _extra-specs-memory-encryption:
+
+Configuring a flavor or image
+-----------------------------
+
+Once an operator has covered the above steps, users can launch SEV
+instances either by requesting a flavor for which the operator set the
+:nova:extra-spec:`hw:mem_encryption` extra spec to ``True``, or by using an
+image with the ``hw_mem_encryption`` property set to ``True``. For example, to
+enable SEV for a flavor:
+
+.. code-block:: console
+
+ $ openstack flavor set FLAVOR-NAME \
+ --property hw:mem_encryption=true
+
+These do not inherently cause a preference for SEV-capable hardware,
+but for now SEV is the only way of fulfilling the requirement for
+memory encryption. However in the future, support for other
+hardware-level guest memory encryption technology such as Intel MKTME
+may be added. If a guest specifically needs to be booted using SEV
+rather than any other memory encryption technology, it is possible to
+ensure this by setting the :nova:extra-spec:`trait{group}:HW_CPU_X86_AMD_SEV`
+extra spec or equivalent image metadata property to ``required``.
+
+In all cases, SEV instances can only be booted from images which have
+the ``hw_firmware_type`` property set to ``uefi``, and only when the
+machine type is set to ``q35``. This can be set per image by setting
+the image property ``hw_machine_type=q35``, or per compute node by
+the operator via :oslo.config:option:`libvirt.hw_machine_type` as
+explained above.
+
+
+Limitations
+-----------
+
+Impermanent limitations
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The following limitations may be removed in the future as the
+hardware, firmware, and various layers of software receive new
+features:
+
+- SEV-encrypted VMs cannot yet be live-migrated or suspended,
+ therefore they will need to be fully shut down before migrating off
+ an SEV host, e.g. if maintenance is required on the host.
+
+- SEV-encrypted VMs cannot contain directly accessible host devices
+ (PCI passthrough). So for example mdev vGPU support will not
+ currently work. However technologies based on `vhost-user`__ should
+ work fine.
+
+ __ https://wiki.qemu.org/Features/VirtioVhostUser
+
+- The boot disk of SEV-encrypted VMs can only be ``virtio``.
+ (``virtio-blk`` is typically the default for libvirt disks on x86,
+ but can also be explicitly set e.g. via the image property
+ ``hw_disk_bus=virtio``). Valid alternatives for the disk
+ include using ``hw_disk_bus=scsi`` with
+ ``hw_scsi_model=virtio-scsi`` , or ``hw_disk_bus=sata``.
+
+- QEMU and libvirt cannot yet expose the number of slots available for
+ encrypted guests in the memory controller on SEV hardware. Until
+ this is implemented, it is not possible for Nova to programmatically
+ detect the correct value. As a short-term workaround, operators can
+ optionally manually specify the upper limit of SEV guests for each
+ compute host, via the new
+ :oslo.config:option:`libvirt.num_memory_encrypted_guests`
+ configuration option :ref:`described above
+ `.
+
+Permanent limitations
+~~~~~~~~~~~~~~~~~~~~~
+
+The following limitations are expected long-term:
+
+- The number of SEV guests allowed to run concurrently will always be
+ limited. `On the first generation of EPYC machines it will be
+ limited to 15 guests`__; however this limit becomes much higher with
+ the second generation (Rome).
+
+ __ https://www.redhat.com/archives/libvir-list/2019-January/msg00652.html
+
+- The operating system running in an encrypted virtual machine must
+ contain SEV support.
+
+Non-limitations
+~~~~~~~~~~~~~~~
+
+For the sake of eliminating any doubt, the following actions are *not*
+expected to be limited when SEV encryption is used:
+
+- Cold migration or shelve, since they power off the VM before the
+ operation at which point there is no encrypted memory (although this
+ could change since there is work underway to add support for `PMEM
+ `_)
+
+- Snapshot, since it only snapshots the disk
+
+- ``nova evacuate`` (despite the name, more akin to resurrection than
+ evacuation), since this is only initiated when the VM is no longer
+ running
+
+- Attaching any volumes, as long as they do not require attaching via
+ an IDE bus
+
+- Use of spice / VNC / serial / RDP consoles
+
+- :doc:`VM guest virtual NUMA `
+
+
+References
+----------
+
+- `libvirt driver launching AMD SEV-encrypted instances (spec)`__
+
+.. __: http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html
diff --git a/doc/source/admin/ssh-configuration.rst b/doc/source/admin/ssh-configuration.rst
index f7e054fdc69..5adff142924 100644
--- a/doc/source/admin/ssh-configuration.rst
+++ b/doc/source/admin/ssh-configuration.rst
@@ -6,7 +6,7 @@ Configure SSH between compute nodes
.. todo::
- Consider merging this into a larger "live-migration" document or to the
+ Consider merging this into a larger "migration" document or to the
installation guide
If you are resizing or migrating an instance between hypervisors, you might
@@ -14,6 +14,12 @@ encounter an SSH (Permission denied) error. Ensure that each node is configured
with SSH key authentication so that the Compute service can use SSH to move
disks to other nodes.
+.. note::
+
+ It is not necessary that all the compute nodes share the same key pair.
+ However for the ease of the configuration, this document only utilizes a
+ single key pair for communication between compute nodes.
+
To share a key pair between compute nodes, complete the following steps:
#. On the first node, obtain a key pair (public key and private key). Use the
@@ -28,14 +34,15 @@ To share a key pair between compute nodes, complete the following steps:
# usermod -s /bin/bash nova
- Switch to the nova account.
+ Ensure you can switch to the nova account:
.. code-block:: console
- # su nova
+ # su - nova
#. As root, create the folder that is needed by SSH and place the private key
- that you obtained in step 1 into this folder:
+ that you obtained in step 1 into this folder, and add the pub key to the
+ authorized_keys file:
.. code-block:: console
@@ -43,29 +50,20 @@ To share a key pair between compute nodes, complete the following steps:
cp /var/lib/nova/.ssh/id_rsa
echo 'StrictHostKeyChecking no' >> /var/lib/nova/.ssh/config
chmod 600 /var/lib/nova/.ssh/id_rsa /var/lib/nova/.ssh/authorized_keys
+ echo >> /var/lib/nova/.ssh/authorized_keys
-#. Repeat steps 2-4 on each node.
-
- .. note::
-
- The nodes must share the same key pair, so do not generate a new key pair
- for any subsequent nodes.
-
-#. From the first node, where you created the SSH key, run:
+#. Copy the whole folder created in step 4 to the rest of the nodes:
.. code-block:: console
- ssh-copy-id -i nova@remote-host
-
- This command installs your public key in a remote machine's
- ``authorized_keys`` folder.
+ # scp -r /var/lib/nova/.ssh remote-host:/var/lib/nova/
#. Ensure that the nova user can now log in to each node without using a
password:
.. code-block:: console
- # su nova
+ # su - nova
$ ssh *computeNodeAddress*
$ exit
diff --git a/doc/source/admin/support-compute.rst b/doc/source/admin/support-compute.rst
index 04b8eed98d4..8522e51d795 100644
--- a/doc/source/admin/support-compute.rst
+++ b/doc/source/admin/support-compute.rst
@@ -9,8 +9,18 @@ a compute node to the instances that run on that node. Another common problem
is trying to run 32-bit images on a 64-bit compute node. This section shows
you how to troubleshoot Compute.
+.. todo:: Move the sections below into sub-pages for readability.
+
+.. toctree::
+ :maxdepth: 1
+
+ troubleshooting/orphaned-allocations.rst
+ troubleshooting/rebuild-placement-db.rst
+ troubleshooting/affinity-policy-violated.rst
+
+
Compute service logging
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
Compute stores a log file for each service in ``/var/log/nova``. For example,
``nova-compute.log`` is the log for the ``nova-compute`` service. You can set
@@ -31,8 +41,9 @@ settings. In ``nova.conf``, include the ``logfile`` option to enable logging.
Alternatively you can set ``use_syslog = 1`` so that the nova daemon logs to
syslog.
+
Guru Meditation reports
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
A Guru Meditation report is sent by the Compute service upon receipt of the
``SIGUSR2`` signal (``SIGUSR1`` before Mitaka). This report is a
@@ -66,10 +77,11 @@ The report has the following sections:
For more information, see :doc:`/reference/gmr`.
+
.. _compute-common-errors-and-fixes:
Common errors and fixes for Compute
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------
The `ask.openstack.org `_ site offers a place to ask
and answer questions, and you can also mark questions as frequently asked
@@ -77,16 +89,17 @@ questions. This section describes some errors people have posted previously.
Bugs are constantly being fixed, so online resources are a great way to get the
most up-to-date errors and fixes.
+
Credential errors, 401, and 403 forbidden errors
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------
Problem
--------
+~~~~~~~
Missing credentials cause a ``403 forbidden`` error.
Solution
---------
+~~~~~~~~
To resolve this issue, use one of these methods:
@@ -107,11 +120,41 @@ services. When your CA information is available, create your ZIP file.
Also, check your HTTP proxy settings to see whether they cause problems with
``novarc`` creation.
+
+Live migration permission issues
+--------------------------------
+
+Problem
+~~~~~~~
+
+When live migrating an instance, you may see errors like the below:
+
+.. code-block:: shell
+
+ libvirtError: operation failed: Failed to connect to remote libvirt URI
+ qemu+ssh://stack@cld6b16/system: Cannot recv data: Host key verification
+ failed.: Connection reset by peer
+
+Solution
+~~~~~~~~
+
+Ensure you have completed all the steps outlined in
+:doc:`/admin/ssh-configuration`. In particular, it's important to note
+that the ``libvirt`` process runs as ``root`` even though it may be connecting
+to a different user (``stack`` in the above example). You can ensure everything
+is correctly configured by attempting to connect to the remote host via the
+``root`` user. Using the above example once again:
+
+.. code-block:: shell
+
+ $ su - -c 'ssh stack@cld6b16'
+
+
Instance errors
-~~~~~~~~~~~~~~~
+---------------
Problem
--------
+~~~~~~~
Sometimes a particular instance shows ``pending`` or you cannot SSH to it.
Sometimes the image itself is the problem. For example, when you use flat
@@ -119,7 +162,7 @@ manager networking, you do not have a DHCP server and certain images do not
support interface injection; you cannot connect to them.
Solution
---------
+~~~~~~~~
To fix instance errors use an image that does support this method, such as
Ubuntu, which obtains an IP address correctly with FlatManager network
@@ -153,11 +196,12 @@ if this command returns an error:
# virsh create libvirt.xml
+
Empty log output for Linux instances
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------
Problem
--------
+~~~~~~~
You can view the log output of running instances from either the
:guilabel:`Log` tab of the dashboard or the output of :command:`nova
@@ -169,7 +213,7 @@ instance via a serial console while the instance itself is not configured to
send output to the console.
Solution
---------
+~~~~~~~~
To rectify this, append the following parameters to kernel arguments specified
in the instance's boot loader:
@@ -181,16 +225,17 @@ in the instance's boot loader:
Upon rebooting, the instance will be configured to send output to the Compute
service.
+
Reset the state of an instance
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------
Problem
--------
+~~~~~~~
Instances can remain in an intermediate state, such as ``deleting``.
Solution
---------
+~~~~~~~~
You can use the :command:`nova reset-state` command to manually reset the state
of an instance to an error state. You can then delete the instance. For
@@ -208,17 +253,18 @@ active state instead of an error state. For example:
$ nova reset-state --active c6bbbf26-b40a-47e7-8d5c-eb17bf65c485
+
Injection problems
-~~~~~~~~~~~~~~~~~~
+------------------
Problem
--------
+~~~~~~~
Instances may boot slowly, or do not boot. File injection can cause this
problem.
Solution
---------
+~~~~~~~~
To disable injection in libvirt, set the following in ``nova.conf``:
@@ -229,45 +275,22 @@ To disable injection in libvirt, set the following in ``nova.conf``:
.. note::
- If you have not enabled the configuration drive and you want to make
- user-specified files available from the metadata server for to improve
- performance and avoid boot failure if injection fails, you must disable
- injection.
+ If you have not enabled the config drive and you want to make user-specified
+ files available from the metadata server for to improve performance and
+ avoid boot failure if injection fails, you must disable injection.
-Disable live snapshotting
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Problem
--------
-
-Administrators using libvirt version ``1.2.2`` may experience problems with
-live snapshot creation. Occasionally, libvirt version ``1.2.2`` fails to create
-live snapshots under the load of creating concurrent snapshot.
-
-Solution
---------
-
-To effectively disable the libvirt live snapshotting, until the problem is
-resolved, configure the ``disable_libvirt_livesnapshot`` option. You can turn
-off the live snapshotting mechanism by setting up its value to ``True`` in the
-``[workarounds]`` section of the ``nova.conf`` file:
-
-.. code-block:: ini
-
- [workarounds]
- disable_libvirt_livesnapshot = True
Cannot find suitable emulator for x86_64
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------
Problem
--------
+~~~~~~~
When you attempt to create a VM, the error shows the VM is in the ``BUILD``
then ``ERROR`` state.
Solution
---------
+~~~~~~~~
On the KVM host, run :command:`cat /proc/cpuinfo`. Make sure the ``vmx`` or
``svm`` flags are set.
@@ -276,16 +299,17 @@ Follow the instructions in the :ref:`enable-kvm`
section in the Nova Configuration Reference to enable hardware
virtualization support in your BIOS.
+
Failed to attach volume after detaching
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------
Problem
--------
+~~~~~~~
Failed to attach a volume after detaching the same volume.
Solution
---------
+~~~~~~~~
You must change the device name on the :command:`nova-attach` command. The VM
might not clean up after a :command:`nova-detach` command runs. This example
@@ -311,11 +335,12 @@ You might also have this problem after attaching and detaching the same volume
from the same VM with the same mount point multiple times. In this case,
restart the KVM host.
+
Failed to attach volume, systool is not installed
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------
Problem
--------
+~~~~~~~
This warning and error occurs if you do not have the required ``sysfsutils``
package installed on the compute node:
@@ -332,7 +357,7 @@ package installed on the compute node:
Failed to attach volume 13d5c633-903a-4764-a5a0-3336945b1db1 at /dev/vdk.
Solution
---------
+~~~~~~~~
Install the ``sysfsutils`` package on the compute node. For example:
@@ -340,11 +365,12 @@ Install the ``sysfsutils`` package on the compute node. For example:
# apt-get install sysfsutils
+
Failed to connect volume in FC SAN
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------
Problem
--------
+~~~~~~~
The compute node failed to connect to a volume in a Fibre Channel (FC) SAN
configuration. The WWN may not be zoned correctly in your FC SAN that links the
@@ -364,16 +390,17 @@ compute host to the storage array:
operation.(HTTP 500)(Request-ID: req-71e5132b-21aa-46ee-b3cc-19b5b4ab2f00)
Solution
---------
+~~~~~~~~
The network administrator must configure the FC SAN fabric by correctly zoning
the WWN (port names) from your compute node HBAs.
+
Multipath call failed exit
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------
Problem
--------
+~~~~~~~
Multipath call failed exit. This warning occurs in the Compute log if you do
not have the optional ``multipath-tools`` package installed on the compute
@@ -389,7 +416,7 @@ your message are unique to your system.
Multipath call failed exit (96)
Solution
---------
+~~~~~~~~
Install the ``multipath-tools`` package on the compute node. For example:
@@ -397,11 +424,12 @@ Install the ``multipath-tools`` package on the compute node. For example:
# apt-get install multipath-tools
+
Failed to Attach Volume, Missing sg_scan
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------
Problem
--------
+~~~~~~~
Failed to attach volume to an instance, ``sg_scan`` file not found. This error
occurs when the sg3-utils package is not installed on the compute node. The
@@ -416,7 +444,7 @@ IDs in your message are unique to your system:
Stdout: '/usr/local/bin/nova-rootwrap: Executable not found: /usr/bin/sg_scan'
Solution
---------
+~~~~~~~~
Install the ``sg3-utils`` package on the compute node. For example:
@@ -424,11 +452,12 @@ Install the ``sg3-utils`` package on the compute node. For example:
# apt-get install sg3-utils
+
Requested microversions are ignored
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------
Problem
--------
+~~~~~~~
When making a request with a microversion beyond 2.1, for example:
@@ -443,9 +472,73 @@ thought it is allowed with the `2.15 microversion`_.
.. _2.15 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id13
Solution
---------
+~~~~~~~~
Ensure the ``compute`` endpoint in the identity service catalog is pointing
at ``/v2.1`` instead of ``/v2``. The former route supports microversions,
while the latter route is considered the legacy v2.0 compatibility-mode
route which renders all requests as if they were made on the legacy v2.0 API.
+
+
+.. _user_token_timeout:
+
+User token times out during long-running operations
+---------------------------------------------------
+
+Problem
+~~~~~~~
+
+Long-running operations such as live migration or snapshot can sometimes
+overrun the expiry of the user token. In such cases, post operations such
+as cleaning up after a live migration can fail when the nova-compute service
+needs to cleanup resources in other services, such as in the block-storage
+(cinder) or networking (neutron) services.
+
+For example:
+
+.. code-block:: console
+
+ 2018-12-17 13:47:29.591 16987 WARNING nova.virt.libvirt.migration [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live migration not completed after 2400 sec
+ 2018-12-17 13:47:30.097 16987 WARNING nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Migration operation was cancelled
+ 2018-12-17 13:47:30.299 16987 ERROR nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live Migration failure: operation aborted: migration job: canceled by client: libvirtError: operation aborted: migration job: canceled by client
+ 2018-12-17 13:47:30.685 16987 INFO nova.compute.manager [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Swapping old allocation on 3e32d595-bd1f-4136-a7f4-c6703d2fbe18 held by migration 17bec61d-544d-47e0-a1c1-37f9d7385286 for instance
+ 2018-12-17 13:47:32.450 16987 ERROR nova.volume.cinder [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] Delete attachment failed for attachment 58997d5b-24f0-4073-819e-97916fb1ee19. Error: The request you have made requires authentication. (HTTP 401) Code: 401: Unauthorized: The request you have made requires authentication. (HTTP 401)
+
+Solution
+~~~~~~~~
+
+Configure nova to use service user tokens to supplement the regular user token
+used to initiate the operation. The identity service (keystone) will then
+authenticate a request using the service user token if the user token has
+already expired.
+
+To use, create a service user in the identity service similar as you would when
+creating the ``nova`` service user.
+
+Then configure the :oslo.config:group:`service_user` section of the nova
+configuration file, for example:
+
+.. code-block:: ini
+
+ [service_user]
+ send_service_user_token = True
+ auth_type = password
+ project_domain_name = Default
+ project_name = service
+ user_domain_name = Default
+ password = secretservice
+ username = nova
+ auth_url = https://104.130.216.102/identity
+ ...
+
+And configure the other identity options as necessary for the service user,
+much like you would configure nova to work with the image service (glance)
+or networking service.
+
+.. note::
+
+ Please note that the role of the :oslo.config:group:`service_user` you
+ configure needs to be a superset of
+ :oslo.config:option:`keystone_authtoken.service_token_roles` (The option
+ :oslo.config:option:`keystone_authtoken.service_token_roles` is configured
+ in cinder, glance and neutron).
diff --git a/doc/source/admin/system-admin.rst b/doc/source/admin/system-admin.rst
deleted file mode 100644
index 9dee99296dd..00000000000
--- a/doc/source/admin/system-admin.rst
+++ /dev/null
@@ -1,89 +0,0 @@
-.. _compute-trusted-pools.rst:
-
-=====================
-System administration
-=====================
-
-.. toctree::
- :maxdepth: 2
-
- manage-users.rst
- manage-volumes.rst
- flavors.rst
- default-ports.rst
- admin-password-injection.rst
- manage-the-cloud.rst
- manage-logs.rst
- root-wrap-reference.rst
- configuring-migrations.rst
- live-migration-usage.rst
- remote-console-access.rst
- service-groups.rst
- node-down.rst
- adv-config.rst
-
-To effectively administer compute, you must understand how the different
-installed nodes interact with each other. Compute can be installed in many
-different ways using multiple servers, but generally multiple compute nodes
-control the virtual servers and a cloud controller node contains the remaining
-Compute services.
-
-The Compute cloud works using a series of daemon processes named ``nova-*``
-that exist persistently on the host machine. These binaries can all run on the
-same machine or be spread out on multiple boxes in a large deployment. The
-responsibilities of services and drivers are:
-
-**Services**
-
-``nova-api``
- Receives XML requests and sends them to the rest of the system. A WSGI app
- routes and authenticates requests. Supports the OpenStack Compute APIs. A
- ``nova.conf`` configuration file is created when Compute is installed.
-
-``nova-compute``
- Manages virtual machines. Loads a Service object, and exposes the public
- methods on ComputeManager through a Remote Procedure Call (RPC).
-
-``nova-conductor``
- Provides database-access support for compute nodes (thereby reducing security
- risks).
-
-``nova-consoleauth``
- Manages console authentication.
-
- .. deprecated:: 18.0.0
-
- ``nova-consoleauth`` is deprecated since 18.0.0 (Rocky) and will be removed
- in an upcoming release.
-
-``nova-objectstore``
- A simple file-based storage system for images that replicates most of the S3
- API. It can be replaced with OpenStack Image service and either a simple
- image manager or OpenStack Object Storage as the virtual machine image
- storage facility. It must exist on the same node as ``nova-compute``.
-
-``nova-network``
- Manages floating and fixed IPs, DHCP, bridging and VLANs. Loads a Service
- object which exposes the public methods on one of the subclasses of
- NetworkManager. Different networking strategies are available by changing the
- ``network_manager`` configuration option to ``FlatManager``,
- ``FlatDHCPManager``, or ``VLANManager`` (defaults to ``VLANManager`` if
- nothing is specified).
-
- .. deprecated:: 14.0.0
-
- ``nova-network`` was deprecated in the OpenStack Newton release.
-
-``nova-scheduler``
- Dispatches requests for new virtual machines to the correct node.
-
-``nova-novncproxy``
- Provides a VNC proxy for browsers, allowing VNC consoles to access virtual
- machines.
-
-.. note::
-
- Some services have drivers that change how the service implements its core
- functionality. For example, the ``nova-compute`` service supports drivers
- that let you choose which hypervisor type it can use. ``nova-network`` and
- ``nova-scheduler`` also have drivers.
diff --git a/doc/source/admin/troubleshooting/affinity-policy-violated.rst b/doc/source/admin/troubleshooting/affinity-policy-violated.rst
new file mode 100644
index 00000000000..a7a563491e2
--- /dev/null
+++ b/doc/source/admin/troubleshooting/affinity-policy-violated.rst
@@ -0,0 +1,78 @@
+Affinity policy violated with parallel requests
+===============================================
+
+Problem
+-------
+
+Parallel server create requests for affinity or anti-affinity land on the same
+host and servers go to the ``ACTIVE`` state even though the affinity or
+anti-affinity policy was violated.
+
+Solution
+--------
+
+There are two ways to avoid anti-/affinity policy violations among multiple
+server create requests.
+
+Create multiple servers as a single request
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Use the `multi-create API`_ with the ``min_count`` parameter set or the
+`multi-create CLI`_ with the ``--min`` option set to the desired number of
+servers.
+
+This works because when the batch of requests is visible to ``nova-scheduler``
+at the same time as a group, it will be able to choose compute hosts that
+satisfy the anti-/affinity constraint and will send them to the same hosts or
+different hosts accordingly.
+
+.. _multi-create API: https://docs.openstack.org/api-ref/compute/#create-multiple-servers
+.. _multi-create CLI: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/server.html#server-create
+
+Adjust Nova configuration settings
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When requests are made separately and the scheduler cannot consider the batch
+of requests at the same time as a group, anti-/affinity races are handled by
+what is called the "late affinity check" in ``nova-compute``. Once a server
+lands on a compute host, if the request involves a server group,
+``nova-compute`` contacts the API database (via ``nova-conductor``) to retrieve
+the server group and then it checks whether the affinity policy has been
+violated. If the policy has been violated, ``nova-compute`` initiates a
+reschedule of the server create request. Note that this means the deployment
+must have :oslo.config:option:`scheduler.max_attempts` set greater than ``1``
+(default is ``3``) to handle races.
+
+An ideal configuration for multiple cells will minimize `upcalls`_ from the
+cells to the API database. This is how devstack, for example, is configured in
+the CI gate. The cell conductors do not set
+:oslo.config:option:`api_database.connection` and ``nova-compute`` sets
+:oslo.config:option:`workarounds.disable_group_policy_check_upcall` to
+``True``.
+
+However, if a deployment needs to handle racing affinity requests, it needs to
+configure cell conductors to have access to the API database, for example:
+
+.. code-block:: ini
+
+ [api_database]
+ connection = mysql+pymysql://root:a@127.0.0.1/nova_api?charset=utf8
+
+The deployment also needs to configure ``nova-compute`` services not to disable
+the group policy check upcall by either not setting (use the default)
+:oslo.config:option:`workarounds.disable_group_policy_check_upcall` or setting
+it to ``False``, for example:
+
+.. code-block:: ini
+
+ [workarounds]
+ disable_group_policy_check_upcall = False
+
+With these settings, anti-/affinity policy should not be violated even when
+parallel server create requests are racing.
+
+Future work is needed to add anti-/affinity support to the placement service in
+order to eliminate the need for the late affinity check in ``nova-compute``.
+
+.. _upcalls: https://docs.openstack.org/nova/latest/user/cellsv2-layout.html#operations-requiring-upcalls
+
diff --git a/doc/source/admin/troubleshooting/orphaned-allocations.rst b/doc/source/admin/troubleshooting/orphaned-allocations.rst
new file mode 100644
index 00000000000..ca49aa4aab4
--- /dev/null
+++ b/doc/source/admin/troubleshooting/orphaned-allocations.rst
@@ -0,0 +1,201 @@
+Orphaned resource allocations
+=============================
+
+Problem
+-------
+
+There are orphaned resource allocations in the placement service which can
+cause resource providers to:
+
+* Appear to the scheduler to be more utilized than they really are
+* Prevent deletion of compute services
+
+One scenario in which this could happen is a compute service host is having
+problems so the administrator forces it down and evacuates servers from it.
+Note that in this case "evacuates" refers to the server ``evacuate`` action,
+not live migrating all servers from the running compute service. Assume the
+compute host is down and fenced.
+
+In this case, the servers have allocations tracked in placement against both
+the down source compute node and their current destination compute host. For
+example, here is a server *vm1* which has been evacuated from node *devstack1*
+to node *devstack2*:
+
+.. code-block:: console
+
+ $ openstack --os-compute-api-version 2.53 compute service list --service nova-compute
+ +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+
+ | ID | Binary | Host | Zone | Status | State | Updated At |
+ +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+
+ | e3c18c2d-9488-4863-b728-f3f292ec5da8 | nova-compute | devstack1 | nova | enabled | down | 2019-10-25T20:13:51.000000 |
+ | 50a20add-cc49-46bd-af96-9bb4e9247398 | nova-compute | devstack2 | nova | enabled | up | 2019-10-25T20:13:52.000000 |
+ | b92afb2e-cd00-4074-803e-fff9aa379c2f | nova-compute | devstack3 | nova | enabled | up | 2019-10-25T20:13:53.000000 |
+ +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+
+ $ vm1=$(openstack server show vm1 -f value -c id)
+ $ openstack server show $vm1 -f value -c OS-EXT-SRV-ATTR:host
+ devstack2
+
+The server now has allocations against both *devstack1* and *devstack2*
+resource providers in the placement service:
+
+.. code-block:: console
+
+ $ devstack1=$(openstack resource provider list --name devstack1 -f value -c uuid)
+ $ devstack2=$(openstack resource provider list --name devstack2 -f value -c uuid)
+ $ openstack resource provider show --allocations $devstack1
+ +-------------+-----------------------------------------------------------------------------------------------------------+
+ | Field | Value |
+ +-------------+-----------------------------------------------------------------------------------------------------------+
+ | uuid | 9546fce4-9fb5-4b35-b277-72ff125ad787 |
+ | name | devstack1 |
+ | generation | 6 |
+ | allocations | {u'a1e6e0b2-9028-4166-b79b-c177ff70fbb7': {u'resources': {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1}}} |
+ +-------------+-----------------------------------------------------------------------------------------------------------+
+ $ openstack resource provider show --allocations $devstack2
+ +-------------+-----------------------------------------------------------------------------------------------------------+
+ | Field | Value |
+ +-------------+-----------------------------------------------------------------------------------------------------------+
+ | uuid | 52d0182d-d466-4210-8f0d-29466bb54feb |
+ | name | devstack2 |
+ | generation | 3 |
+ | allocations | {u'a1e6e0b2-9028-4166-b79b-c177ff70fbb7': {u'resources': {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1}}} |
+ +-------------+-----------------------------------------------------------------------------------------------------------+
+ $ openstack --os-placement-api-version 1.12 resource provider allocation show $vm1
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+ | resource_provider | generation | resources | project_id | user_id |
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+ | 9546fce4-9fb5-4b35-b277-72ff125ad787 | 6 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af |
+ | 52d0182d-d466-4210-8f0d-29466bb54feb | 3 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af |
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+
+One way to find all servers that were evacuated from *devstack1* is:
+
+.. code-block:: console
+
+ $ nova migration-list --source-compute devstack1 --migration-type evacuation
+ +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+
+ | Id | UUID | Source Node | Dest Node | Source Compute | Dest Compute | Dest Host | Status | Instance UUID | Old Flavor | New Flavor | Created At | Updated At | Type |
+ +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+
+ | 1 | 8a823ba3-e2e9-4f17-bac5-88ceea496b99 | devstack1 | devstack2 | devstack1 | devstack2 | 192.168.0.1 | done | a1e6e0b2-9028-4166-b79b-c177ff70fbb7 | None | None | 2019-10-25T17:46:35.000000 | 2019-10-25T17:46:37.000000 | evacuation |
+ +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+
+
+Trying to delete the resource provider for *devstack1* will fail while there
+are allocations against it:
+
+.. code-block:: console
+
+ $ openstack resource provider delete $devstack1
+ Unable to delete resource provider 9546fce4-9fb5-4b35-b277-72ff125ad787: Resource provider has allocations. (HTTP 409)
+
+Solution
+--------
+
+Using the example resources above, remove the allocation for server *vm1* from
+the *devstack1* resource provider. If you have `osc-placement
+`_ 1.8.0 or newer, you can use the
+:command:`openstack resource provider allocation unset` command to remove the
+allocations for consumer *vm1* from resource provider *devstack1*:
+
+.. code-block:: console
+
+ $ openstack --os-placement-api-version 1.12 resource provider allocation \
+ unset --provider $devstack1 $vm1
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+ | resource_provider | generation | resources | project_id | user_id |
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+ | 52d0182d-d466-4210-8f0d-29466bb54feb | 4 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af |
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+
+If you have *osc-placement* 1.7.x or older, the ``unset`` command is not
+available and you must instead overwrite the allocations. Note that we do not
+use :command:`openstack resource provider allocation delete` here because that
+will remove the allocations for the server from all resource providers,
+including *devstack2* where it is now running; instead, we use
+:command:`openstack resource provider allocation set` to overwrite the
+allocations and only retain the *devstack2* provider allocations. If you do
+remove all allocations for a given server, you can heal them later. See `Using
+heal_allocations`_ for details.
+
+.. code-block:: console
+
+ $ openstack --os-placement-api-version 1.12 resource provider allocation set $vm1 \
+ --project-id 2f3bffc5db2b47deb40808a4ed2d7c7a \
+ --user-id 2206168427c54d92ae2b2572bb0da9af \
+ --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,VCPU=1 \
+ --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,MEMORY_MB=512 \
+ --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,DISK_GB=1
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+ | resource_provider | generation | resources | project_id | user_id |
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+ | 52d0182d-d466-4210-8f0d-29466bb54feb | 4 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af |
+ +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+
+
+Once the *devstack1* resource provider allocations have been removed using
+either of the approaches above, the *devstack1* resource provider can be
+deleted:
+
+.. code-block:: console
+
+ $ openstack resource provider delete $devstack1
+
+And the related compute service if desired:
+
+.. code-block:: console
+
+ $ openstack --os-compute-api-version 2.53 compute service delete e3c18c2d-9488-4863-b728-f3f292ec5da8
+
+For more details on the resource provider commands used in this guide, refer
+to the `osc-placement plugin documentation`_.
+
+.. _osc-placement plugin documentation: https://docs.openstack.org/osc-placement/latest/
+
+Using heal_allocations
+~~~~~~~~~~~~~~~~~~~~~~
+
+If you have a particularly troubling allocation consumer and just want to
+delete its allocations from all providers, you can use the
+:command:`openstack resource provider allocation delete` command and then
+heal the allocations for the consumer using the
+:ref:`heal_allocations command `. For example:
+
+.. code-block:: console
+
+ $ openstack resource provider allocation delete $vm1
+ $ nova-manage placement heal_allocations --verbose --instance $vm1
+ Looking for instances in cell: 04879596-d893-401c-b2a6-3d3aa096089d(cell1)
+ Found 1 candidate instances.
+ Successfully created allocations for instance a1e6e0b2-9028-4166-b79b-c177ff70fbb7.
+ Processed 1 instances.
+ $ openstack resource provider allocation show $vm1
+ +--------------------------------------+------------+------------------------------------------------+
+ | resource_provider | generation | resources |
+ +--------------------------------------+------------+------------------------------------------------+
+ | 52d0182d-d466-4210-8f0d-29466bb54feb | 5 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} |
+ +--------------------------------------+------------+------------------------------------------------+
+
+Note that deleting allocations and then relying on ``heal_allocations`` may not
+always be the best solution since healing allocations does not account for some
+things:
+
+* `Migration-based allocations`_ would be lost if manually deleted during a
+ resize. These are allocations tracked by the migration resource record
+ on the source compute service during a migration.
+* Healing allocations only partially support nested allocations. Nested
+ allocations due to Neutron ports having QoS policies are supported since
+ 20.0.0 (Train) release. But nested allocations due to vGPU or Cyborg device
+ profile requests in the flavor are not supported. Also if you are using
+ provider.yaml files on compute hosts to define additional resources, if those
+ resources are defined on child resource providers then instances using such
+ resources are not supported.
+
+If you do use the ``heal_allocations`` command to cleanup allocations for a
+specific trouble instance, it is recommended to take note of what the
+allocations were before you remove them in case you need to reset them manually
+later. Use the :command:`openstack resource provider allocation show` command
+to get allocations for a consumer before deleting them, e.g.:
+
+.. code-block:: console
+
+ $ openstack --os-placement-api-version 1.12 resource provider allocation show $vm1
+
+.. _Migration-based allocations: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/migration-allocations.html
diff --git a/doc/source/admin/troubleshooting/rebuild-placement-db.rst b/doc/source/admin/troubleshooting/rebuild-placement-db.rst
new file mode 100644
index 00000000000..cf877fe9aa4
--- /dev/null
+++ b/doc/source/admin/troubleshooting/rebuild-placement-db.rst
@@ -0,0 +1,56 @@
+Rebuild placement DB
+====================
+
+Problem
+-------
+
+You have somehow changed a nova cell database and the ``compute_nodes`` table
+entries are now reporting different uuids to the placement service but
+placement already has ``resource_providers`` table entries with the same
+names as those computes so the resource providers in placement and the
+compute nodes in the nova database are not synchronized. Maybe this happens
+as a result of restoring the nova cell database from a backup where the compute
+hosts have not changed but they are using different uuids.
+
+Nova reports compute node inventory to placement using the
+``hypervisor_hostname`` and uuid of the ``compute_nodes`` table to the
+placement ``resource_providers`` table, which has a unique constraint on the
+name (hostname in this case) and uuid. Trying to create a new resource provider
+with a new uuid but the same name as an existing provider results in a 409
+error from placement, such as in `bug 1817833`_.
+
+.. _bug 1817833: https://bugs.launchpad.net/nova/+bug/1817833
+
+Solution
+--------
+
+.. warning:: This is likely a last resort when *all* computes and resource
+ providers are not synchronized and it is simpler to just rebuild
+ the placement database from the current state of nova. This may,
+ however, not work when using placement for more advanced features
+ such as :neutron-doc:`ports with minimum bandwidth guarantees `
+ or `accelerators `_.
+ Obviously testing first in a pre-production environment is ideal.
+
+These are the steps at a high level:
+
+#. Make a backup of the existing placement database in case these steps fail
+ and you need to start over.
+
+#. Recreate the placement database and run the schema migrations to
+ initialize the placement database.
+
+#. Either restart or wait for the
+ :oslo.config:option:`update_resources_interval` on the ``nova-compute``
+ services to report resource providers and their inventory to placement.
+
+#. Run the :ref:`nova-manage placement heal_allocations `
+ command to report allocations to placement for the existing instances in
+ nova.
+
+#. Run the :ref:`nova-manage placement sync_aggregates `
+ command to synchronize nova host aggregates to placement resource provider
+ aggregates.
+
+Once complete, test your deployment as usual, e.g. running Tempest integration
+and/or Rally tests, creating, migrating and deleting a server, etc.
diff --git a/doc/source/admin/uefi.rst b/doc/source/admin/uefi.rst
new file mode 100644
index 00000000000..8c10f205066
--- /dev/null
+++ b/doc/source/admin/uefi.rst
@@ -0,0 +1,69 @@
+====
+UEFI
+====
+
+.. versionadded:: 17.0.0 (Queens)
+
+Nova supports configuring a `UEFI bootloader`__ for guests. This brings about
+important advantages over legacy BIOS bootloaders and allows for features such
+as :doc:`secure-boot`.
+
+.. __: https://en.wikipedia.org/wiki/Unified_Extensible_Firmware_Interface
+
+
+Enabling UEFI
+-------------
+
+Currently the configuration of UEFI guest bootloaders is only supported when
+using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type`
+of ``kvm`` or ``qemu`` or when using the Hyper-V compute driver with certain
+machine types. When using the libvirt compute driver with AArch64-based guests,
+UEFI is automatically enabled as AArch64 does not support BIOS.
+
+.. todo::
+
+ Update this once compute drivers start reporting a trait indicating UEFI
+ bootloader support.
+
+
+Configuring a flavor or image
+-----------------------------
+
+Configuring a UEFI bootloader varies depending on the compute driver in use.
+
+.. rubric:: Libvirt
+
+UEFI support is enabled by default on AArch64-based guests. For other guest
+architectures, you can request UEFI support with libvirt by setting the
+``hw_firmware_type`` image property to ``uefi``. For example:
+
+.. code-block:: bash
+
+ $ openstack image set --property hw_firmware_type=uefi $IMAGE
+
+.. rubric:: Hyper-V
+
+It is not possible to explicitly request UEFI support with Hyper-V. Rather, it
+is enabled implicitly when using `Generation 2`__ guests. You can request a
+Generation 2 guest by setting the ``hw_machine_type`` image metadata property
+to ``hyperv-gen2``. For example:
+
+.. code-block:: bash
+
+ $ openstack image set --property hw_machine_type=hyperv-gen2 $IMAGE
+
+.. __: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/plan/should-i-create-a-generation-1-or-2-virtual-machine-in-hyper-v
+
+
+References
+----------
+
+* `Hyper-V UEFI Secure Boot (spec)`__
+* `Open Virtual Machine Firmware (OVMF) Status Report`__
+* `Anatomy of a boot, a QEMU perspective`__
+* `Should I create a generation 1 or 2 virtual machine in Hyper-V?`__
+
+.. __: https://specs.openstack.org/openstack/nova-specs/specs/ocata/implemented/hyper-v-uefi-secureboot.html
+.. __: http://www.linux-kvm.org/downloads/lersek/ovmf-whitepaper-c770f8c.txt
+.. __: https://www.qemu.org/2020/07/03/anatomy-of-a-boot/
+.. __: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/plan/should-i-create-a-generation-1-or-2-virtual-machine-in-hyper-v
diff --git a/doc/source/admin/upgrades.rst b/doc/source/admin/upgrades.rst
new file mode 100644
index 00000000000..75ac5a4ca56
--- /dev/null
+++ b/doc/source/admin/upgrades.rst
@@ -0,0 +1,341 @@
+========
+Upgrades
+========
+
+Nova aims to provide upgrades with minimal downtime.
+
+Firstly, the data plane. There should be no VM downtime when you upgrade
+Nova. Nova has had this since the early days.
+
+Secondly, we want no downtime during upgrades of the Nova control plane.
+This document is trying to describe how we can achieve that.
+
+Once we have introduced the key concepts relating to upgrade, we will
+introduce the process needed for a no downtime upgrade of nova.
+
+
+.. _minimal_downtime_upgrade:
+
+Minimal Downtime Upgrade Process
+--------------------------------
+
+Plan your upgrade
+~~~~~~~~~~~~~~~~~
+
+* Read and ensure you understand the release notes for the next release.
+
+* You should ensure all required steps from the previous upgrade have been
+ completed, such as data migrations.
+
+* Make a backup of your database. Nova does not support downgrading of the
+ database. Hence, in case of upgrade failure, restoring database from backup
+ is the only choice.
+
+* During upgrade be aware that there will be additional load on nova-conductor.
+ You may find you need to add extra nova-conductor workers to deal with the
+ additional upgrade related load.
+
+Rolling upgrade process
+~~~~~~~~~~~~~~~~~~~~~~~
+
+To reduce downtime, the compute services can be upgraded in a rolling fashion.
+It means upgrading a few services at a time. This results in a condition where
+both old (N) and new (N+1) nova-compute services co-exist for a certain time
+period. Note that, there is no upgrade of the hypervisor here, this is just
+upgrading the nova services. If reduced downtime is not a concern (or lower
+complexity is desired), all services may be taken down and restarted at the
+same time.
+
+.. important::
+
+ Nova does not currently support the coexistence of N and N+2 or greater
+ :program:`nova-compute` or :program:`nova-conductor` services in the same
+ deployment. The `nova-conductor`` service will fail to start when a
+ ``nova-compute`` service that is older than the previous release (N-2 or
+ greater) is detected. Similarly, in a :doc:`deployment with multiple cells
+ `, neither the super conductor service nor any
+ per-cell conductor service will start if any other conductor service in the
+ deployment is older than the previous release.
+
+#. Before maintenance window:
+
+ * Start the process with the controller node. Install the code for the next
+ version of Nova, either in a venv or a separate control plane node,
+ including all the python dependencies.
+
+ * Using the newly installed nova code, run the DB sync. First run
+ ``nova-manage api_db sync``, then ``nova-manage db sync``. ``nova-manage
+ db sync`` should be run for all cell databases, including ``cell0``. If
+ necessary, the ``--config-file`` argument can be used to point to the
+ correct ``nova.conf`` file for the given cell.
+
+ These schema change operations should have minimal or no effect on
+ performance, and should not cause any operations to fail.
+
+ * At this point, new columns and tables may exist in the database. These
+ DB schema changes are done in a way that both the N and N+1 release can
+ perform operations against the same schema.
+
+#. During maintenance window:
+
+ * Several nova services rely on the external placement service being at the
+ latest level. Therefore, you must upgrade placement before any nova
+ services. See the
+ :placement-doc:`placement upgrade notes ` for
+ more details on upgrading the placement service.
+
+ * For maximum safety (no failed API operations), gracefully shutdown all
+ the services (i.e. SIG_TERM) except nova-compute.
+
+ * Before restarting services with new code, perform the release-specific
+ readiness check with ``nova-status upgrade check``. See the
+ :ref:`nova-status upgrade check ` for more details
+ on status check.
+
+ * Start all services on the new code, with
+ ``[upgrade_levels]compute=auto`` in nova.conf. It is safest to
+ start nova-conductor first and nova-api last. Note that you may
+ use a static alias name instead of ``auto``, such as
+ ``[upgrade_levels]compute=``. Also note that this step is
+ only required if compute services are not upgraded in lock-step
+ with the control services.
+
+ * If desired, gracefully shutdown nova-compute (i.e. SIG_TERM)
+ services in small batches, then start the new version of the code
+ with: ``[upgrade_levels]compute=auto``. If this batch-based approach
+ is used, only a few compute nodes will have any delayed API
+ actions, and to ensure there is enough capacity online to service
+ any boot requests that happen during this time.
+
+#. After maintenance window:
+
+ * Once all services are running the new code, double check in the DB that
+ there are no old orphaned service records using `nova service-list`.
+
+ * Now that all services are upgraded, we need to send the SIG_HUP signal, so all
+ the services clear any cached service version data. When a new service
+ starts, it automatically detects which version of the compute RPC protocol
+ to use, and it can decide if it is safe to do any online data migrations.
+ Note, if you used a static value for the upgrade_level, such as
+ ``[upgrade_levels]compute=``, you must update nova.conf to remove
+ that configuration value and do a full service restart.
+
+ * Now all the services are upgraded and signaled, the system is able to use
+ the latest version of the RPC protocol and can access all of the
+ features in the new release.
+
+ * Once all the services are running the latest version of the code, and all
+ the services are aware they all have been upgraded, it is safe to
+ transform the data in the database into its new format. While some of this
+ work happens on demand when the system reads a database row that needs
+ updating, we must get all the data transformed into the current version
+ before the next upgrade. Additionally, some data may not be transformed
+ automatically so performing the data migration is necessary to avoid
+ performance degradation due to compatibility routines.
+
+ * This process can put significant extra write load on the
+ database. Complete all online data migrations using:
+ ``nova-manage db online_data_migrations --max-count ``. Note
+ that you can use the ``--max-count`` argument to reduce the load this
+ operation will place on the database, which allows you to run a
+ small chunk of the migrations until all of the work is done. The chunk size
+ you should use depends on your infrastructure and how much additional load
+ you can impose on the database. To reduce load, perform smaller batches
+ with delays between chunks. To reduce time to completion, run larger batches.
+ Each time it is run, the command will show a summary of completed and remaining
+ records. If using the ``--max-count`` option, the command should be rerun
+ while it returns exit status 1 (which indicates that some migrations took
+ effect, and more work may remain to be done), even if some migrations
+ produce errors. If all possible migrations have completed and some are
+ still producing errors, exit status 2 will be returned. In this case, the
+ cause of the errors should be investigated and resolved. Migrations should be
+ considered successfully completed only when the command returns exit status 0.
+
+ * At this point, you must also ensure you update the configuration, to stop
+ using any deprecated features or options, and perform any required work
+ to transition to alternative features. All deprecated options are
+ supported for at least one cycle, but should be removed before your next
+ upgrade is performed.
+
+
+Current Database Upgrade Types
+------------------------------
+
+Currently Nova has two types of database upgrades that are in use.
+
+- Schema Migrations
+- Data Migrations
+
+Nova does not support database downgrades.
+
+.. _schema-migrations:
+
+Schema Migrations
+~~~~~~~~~~~~~~~~~
+
+Schema migrations are defined in ``nova/db/main/migrations/versions`` and
+``nova/db/api/migrations/versions``. They are the routines that transform our
+database structure, which should be additive and able to be applied to a
+running system before service code has been upgraded.
+
+For information on developing your own schema migrations as part of a feature
+or bugfix, refer to :doc:`/reference/database-migrations`.
+
+.. note::
+
+ The API database migrations should be assumed to run before the
+ migrations for the main/cell databases. This is because the former
+ contains information about how to find and connect to the latter.
+ Some management commands that operate on multiple cells will attempt
+ to list and iterate over cell mapping records, which require a
+ functioning API database schema.
+
+.. _data-migrations:
+
+Data Migrations
+~~~~~~~~~~~~~~~
+
+Online data migrations occur in two places:
+
+#. Inline migrations that occur as part of normal run-time
+ activity as data is read in the old format and written in the
+ new format
+
+#. Background online migrations that are performed using
+ ``nova-manage`` to complete transformations that will not occur
+ incidentally due to normal runtime activity.
+
+An example of online data migrations are the flavor migrations done as part
+of Nova object version 1.18. This included a transient migration of flavor
+storage from one database location to another.
+
+For information on developing your own schema migrations as part of a feature
+or bugfix, refer to :doc:`/reference/database-migrations`.
+
+Migration policy
+~~~~~~~~~~~~~~~~
+
+The following guidelines for schema and data migrations are followed in order
+to ease upgrades:
+
+* Additive schema migrations - In general, almost all schema migrations should
+ be additive. Put simply, they should only create elements like columns,
+ indices, and tables.
+
+* Subtractive schema migrations - To remove an element like a column or table
+ during the N release cycle:
+
+ #. The element must be deprecated and retained for backward compatibility.
+ (This allows for graceful upgrade from N to N+1.)
+
+ #. Data migration, by the objects layer, must completely migrate data from
+ the old version of the schema to the new version.
+
+ #. The column can then be removed with a migration at the start of N+2.
+
+* All schema migrations should be idempotent. For example, a migration
+ should check if an element exists in the schema before attempting to add
+ it. This logic comes for free in the autogenerated workflow of
+ the online migrations.
+
+* Constraints - When adding a foreign or unique key constraint, the schema
+ migration code needs to handle possible problems with data before applying
+ the constraint. (Example: A unique constraint must clean up duplicate
+ records before applying said constraint.)
+
+* Data migrations - As mentioned above, data migrations will be done in an
+ online fashion by custom code in the object layer that handles moving data
+ between the old and new portions of the schema. In addition, for each type
+ of data migration performed, there should exist a nova-manage option for an
+ operator to manually request that rows be migrated.
+
+
+Concepts
+--------
+
+Here are the key concepts you need to know before reading the section on the
+upgrade process:
+
+RPC version pinning
+ Through careful RPC versioning, newer nodes are able to talk to older
+ nova-compute nodes. When upgrading control plane nodes, we can pin them
+ at an older version of the compute RPC API, until all the compute nodes
+ are able to be upgraded.
+ https://wiki.openstack.org/wiki/RpcMajorVersionUpdates
+
+ .. note::
+
+ The procedure for rolling upgrades with multiple cells v2 cells is not
+ yet determined.
+
+Online Configuration Reload
+ During the upgrade, we pin new serves at the older RPC version. When all
+ services are updated to use newer code, we need to unpin them so we are
+ able to use any new functionality.
+ To avoid having to restart the service, using the current SIGHUP signal
+ handling, or otherwise, ideally we need a way to update the currently
+ running process to use the latest configuration.
+
+Graceful service shutdown
+ Many nova services are python processes listening for messages on a
+ AMQP queue, including nova-compute. When sending the process the SIGTERM
+ the process stops getting new work from its queue, completes any
+ outstanding work, then terminates. During this process, messages can be
+ left on the queue for when the python process starts back up.
+ This gives us a way to shutdown a service using older code, and start
+ up a service using newer code with minimal impact. If its a service that
+ can have multiple workers, like nova-conductor, you can usually add the
+ new workers before the graceful shutdown of the old workers. In the case
+ of singleton services, like nova-compute, some actions could be delayed
+ during the restart, but ideally no actions should fail due to the restart.
+
+ .. note::
+
+ While this is true for the RabbitMQ RPC backend, we need to confirm
+ what happens for other RPC backends.
+
+API load balancer draining
+ When upgrading API nodes, you can make your load balancer only send new
+ connections to the newer API nodes, allowing for a seamless update of your
+ API nodes.
+
+Expand/Contract DB Migrations
+ Modern databases are able to make many schema changes while you are still
+ writing to the database. Taking this a step further, we can make all DB
+ changes by first adding the new structures, expanding. Then you can slowly
+ move all the data into a new location and format. Once that is complete,
+ you can drop bits of the scheme that are no long needed,
+ i.e. contract. This happens multiple cycles after we have stopped
+ using a particular piece of schema, and can happen in a schema
+ migration without affecting runtime code.
+
+Online Data Migrations using objects
+ Since Kilo, we have moved all data migration into the DB objects code.
+ When trying to migrate data in the database from the old format to the
+ new format, this is done in the object code when reading or saving things
+ that are in the old format. For records that are not updated, you need to
+ run a background process to convert those records into the newer format.
+ This process must be completed before you contract the database schema.
+
+DB prune deleted rows
+ Currently resources are soft deleted in the main database, so users are able
+ to track instances in the DB that are created and destroyed in production.
+ However, most people have a data retention policy, of say 30 days or 90
+ days after which they will want to delete those entries. Not deleting
+ those entries affects DB performance as indices grow very large and data
+ migrations take longer as there is more data to migrate.
+
+nova-conductor object backports
+ RPC pinning ensures new services can talk to the older service's method
+ signatures. But many of the parameters are objects that may well be too
+ new for the old service to understand, so you are able to send the object
+ back to the nova-conductor to be downgraded to a version the older service
+ can understand.
+
+
+Testing
+-------
+
+We use the "grenade" jobs to test upgrades. The current tests only cover the
+existing upgrade process where old computes can run with new control plane but
+control plane is turned off for DB migrations.
diff --git a/doc/source/admin/vendordata.rst b/doc/source/admin/vendordata.rst
new file mode 100644
index 00000000000..ff412e83e57
--- /dev/null
+++ b/doc/source/admin/vendordata.rst
@@ -0,0 +1,178 @@
+==========
+Vendordata
+==========
+
+.. note::
+
+ This section provides deployment information about the vendordata feature.
+ For end-user information about the vendordata feature and instance metadata
+ in general, refer to the :doc:`user guide `.
+
+The *vendordata* feature provides a way to pass vendor or deployment-specific
+information to instances. This can be accessed by users using :doc:`the metadata
+service ` or with :doc:`config drives
+`.
+
+There are two vendordata modules provided with nova: ``StaticJSON`` and
+``DynamicJSON``.
+
+
+``StaticJSON``
+--------------
+
+The ``StaticJSON`` module includes the contents of a static JSON file loaded
+from disk. This can be used for things which don't change between instances,
+such as the location of the corporate puppet server. It is the default provider.
+
+Configuration
+~~~~~~~~~~~~~
+
+The service you must configure to enable the ``StaticJSON`` vendordata module
+depends on how guests are accessing vendordata. If using the metadata service,
+configuration applies to either :program:`nova-api` or
+:program:`nova-api-metadata`, depending on the deployment, while if using
+config drives, configuration applies to :program:`nova-compute`. However,
+configuration is otherwise the same and the following options apply:
+
+- :oslo.config:option:`api.vendordata_providers`
+- :oslo.config:option:`api.vendordata_jsonfile_path`
+
+Refer to the :doc:`metadata service ` and :doc:`config
+drive ` documentation for more information on how to
+configure the required services.
+
+
+``DynamicJSON``
+---------------
+
+The ``DynamicJSON`` module can make a request to an external REST service to
+determine what metadata to add to an instance. This is how we recommend you
+generate things like Active Directory tokens which change per instance.
+
+When used, the ``DynamicJSON`` module will make a request to any REST services
+listed in the :oslo.config:option:`api.vendordata_dynamic_targets` configuration
+option. There can be more than one of these but note that they will be queried
+once per metadata request from the instance which can mean a lot of traffic
+depending on your configuration and the configuration of the instance.
+
+The following data is passed to your REST service as a JSON encoded POST:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Key
+ - Description
+ * - ``project-id``
+ - The ID of the project that owns this instance.
+ * - ``instance-id``
+ - The UUID of this instance.
+ * - ``image-id``
+ - The ID of the image used to boot this instance.
+ * - ``user-data``
+ - As specified by the user at boot time.
+ * - ``hostname``
+ - The hostname of the instance.
+ * - ``metadata``
+ - As specified by the user at boot time.
+
+Metadata fetched from the REST service will appear in the metadata service at a
+new file called ``vendordata2.json``, with a path (either in the metadata service
+URL or in the config drive) like this::
+
+ openstack/latest/vendor_data2.json
+
+For each dynamic target, there will be an entry in the JSON file named after
+that target. For example:
+
+.. code-block:: json
+
+ {
+ "testing": {
+ "value1": 1,
+ "value2": 2,
+ "value3": "three"
+ }
+ }
+
+The `novajoin`__ project provides a dynamic vendordata service to manage host
+instantiation in an IPA server.
+
+__ https://opendev.org/x/novajoin
+
+Deployment considerations
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Nova provides authentication to external metadata services in order to provide
+some level of certainty that the request came from nova. This is done by
+providing a service token with the request -- you can then just deploy your
+metadata service with the keystone authentication WSGI middleware. This is
+configured using the keystone authentication parameters in the
+:oslo.config:group:`vendordata_dynamic_auth` configuration group.
+
+Configuration
+~~~~~~~~~~~~~
+
+As with ``StaticJSON``, the service you must configure to enable the
+``DynamicJSON`` vendordata module depends on how guests are accessing
+vendordata. If using the metadata service, configuration applies to either
+:program:`nova-api` or :program:`nova-api-metadata`, depending on the
+deployment, while if using config drives, configuration applies to
+:program:`nova-compute`. However, configuration is otherwise the same and the
+following options apply:
+
+- :oslo.config:option:`api.vendordata_providers`
+- :oslo.config:option:`api.vendordata_dynamic_ssl_certfile`
+- :oslo.config:option:`api.vendordata_dynamic_connect_timeout`
+- :oslo.config:option:`api.vendordata_dynamic_read_timeout`
+- :oslo.config:option:`api.vendordata_dynamic_failure_fatal`
+- :oslo.config:option:`api.vendordata_dynamic_targets`
+
+Refer to the :doc:`metadata service ` and :doc:`config
+drive ` documentation for more information on how to
+configure the required services.
+
+In addition, there are also many options related to authentication. These are
+provided by :keystone-doc:`keystone <>` but are listed below for completeness:
+
+- :oslo.config:option:`vendordata_dynamic_auth.cafile`
+- :oslo.config:option:`vendordata_dynamic_auth.certfile`
+- :oslo.config:option:`vendordata_dynamic_auth.keyfile`
+- :oslo.config:option:`vendordata_dynamic_auth.insecure`
+- :oslo.config:option:`vendordata_dynamic_auth.timeout`
+- :oslo.config:option:`vendordata_dynamic_auth.collect_timing`
+- :oslo.config:option:`vendordata_dynamic_auth.split_loggers`
+- :oslo.config:option:`vendordata_dynamic_auth.auth_type`
+- :oslo.config:option:`vendordata_dynamic_auth.auth_section`
+- :oslo.config:option:`vendordata_dynamic_auth.auth_url`
+- :oslo.config:option:`vendordata_dynamic_auth.system_scope`
+- :oslo.config:option:`vendordata_dynamic_auth.domain_id`
+- :oslo.config:option:`vendordata_dynamic_auth.domain_name`
+- :oslo.config:option:`vendordata_dynamic_auth.project_id`
+- :oslo.config:option:`vendordata_dynamic_auth.project_name`
+- :oslo.config:option:`vendordata_dynamic_auth.project_domain_id`
+- :oslo.config:option:`vendordata_dynamic_auth.project_domain_name`
+- :oslo.config:option:`vendordata_dynamic_auth.trust_id`
+- :oslo.config:option:`vendordata_dynamic_auth.default_domain_id`
+- :oslo.config:option:`vendordata_dynamic_auth.default_domain_name`
+- :oslo.config:option:`vendordata_dynamic_auth.user_id`
+- :oslo.config:option:`vendordata_dynamic_auth.username`
+- :oslo.config:option:`vendordata_dynamic_auth.user_domain_id`
+- :oslo.config:option:`vendordata_dynamic_auth.user_domain_name`
+- :oslo.config:option:`vendordata_dynamic_auth.password`
+- :oslo.config:option:`vendordata_dynamic_auth.tenant_id`
+- :oslo.config:option:`vendordata_dynamic_auth.tenant_name`
+
+Refer to the :keystone-doc:`keystone documentation `
+for information on configuring these.
+
+
+References
+----------
+
+* Michael Still's talk from the Queens summit in Sydney, `Metadata, User Data,
+ Vendor Data, oh my!`__
+* Michael's blog post on `deploying a simple vendordata service`__ which
+ provides more details and sample code to supplement the documentation above.
+
+__ https://www.openstack.org/videos/sydney-2017/metadata-user-data-vendor-data-oh-my
+__ https://www.madebymikal.com/nova-vendordata-deployment-an-excessively-detailed-guide/
diff --git a/doc/source/admin/virtual-gpu.rst b/doc/source/admin/virtual-gpu.rst
index f7b76a67b2e..358e613bc09 100644
--- a/doc/source/admin/virtual-gpu.rst
+++ b/doc/source/admin/virtual-gpu.rst
@@ -2,6 +2,11 @@
Attaching virtual GPU devices to guests
=======================================
+.. important::
+
+ The functionality described below is only supported by the libvirt/KVM
+ driver.
+
The virtual GPU feature in Nova allows a deployment to provide specific GPU
types for instances using physical GPUs that can provide virtual devices.
@@ -10,14 +15,11 @@ Graphics Processing Unit (pGPU) can be virtualized as multiple virtual Graphics
Processing Units (vGPUs) if the hypervisor supports the hardware driver and has
the capability to create guests using those virtual devices.
-This feature is highly dependent on the hypervisor, its version and the
-physical devices present on the host.
-
-.. important:: As of the Queens release, there is no upstream continuous
- integration testing with a hardware environment that has virtual
- GPUs and therefore this feature is considered experimental.
+This feature is highly dependent on the version of libvirt and the physical
+devices present on the host. In addition, the vendor's vGPU driver software
+must be installed and configured on the host at the same time.
-Hypervisor-specific caveats are mentioned in the `Caveats`_ section.
+Caveats are mentioned in the `Caveats`_ section.
To enable virtual GPUs, follow the steps below:
@@ -31,24 +33,53 @@ Enable GPU types (Compute)
#. Specify which specific GPU type(s) the instances would get.
- Edit :oslo.config:option:`devices.enabled_vgpu_types`:
+ Edit :oslo.config:option:`devices.enabled_mdev_types`:
+
+ .. code-block:: ini
+
+ [devices]
+ enabled_mdev_types = nvidia-35
+
+ If you want to support more than a single GPU type, you need to provide a
+ separate configuration section for each device. For example:
.. code-block:: ini
[devices]
- enabled_vgpu_types = nvidia-35
+ enabled_mdev_types = nvidia-35, nvidia-36
+
+ [mdev_nvidia-35]
+ device_addresses = 0000:84:00.0,0000:85:00.0
- .. note::
+ [mdev_nvidia-36]
+ device_addresses = 0000:86:00.0
- As of the Queens release, Nova only supports a single type. If more
- than one vGPU type is specified (as a comma-separated list), only the
- first one will be used.
+ where you have to define which physical GPUs are supported per GPU type.
+
+ If the same PCI address is provided for two different types, nova-compute
+ will refuse to start and issue a specific error in the logs.
To know which specific type(s) to mention, please refer to `How to discover
a GPU type`_.
+ .. versionchanged:: 21.0.0
+
+ Supporting multiple GPU types is only supported by the Ussuri release and
+ later versions.
+
#. Restart the ``nova-compute`` service.
+
+ .. warning::
+
+ Changing the type is possible but since existing physical GPUs can't
+ address multiple guests having different types, that will make Nova
+ return you a NoValidHost if existing instances with the original type
+ still exist. Accordingly, it's highly recommended to instead deploy the
+ new type to new compute nodes that don't already have workloads and
+ rebuild instances on the nodes that need to change types.
+
+
Configure a flavor (Controller)
-------------------------------
@@ -60,13 +91,14 @@ Configure a flavor to request one virtual GPU:
.. note::
- As of the Queens release, all hypervisors that support virtual GPUs
- only accept a single virtual GPU per instance.
+ As of the Queens release, all hypervisors that support virtual GPUs
+ only accept a single virtual GPU per instance.
The enabled vGPU types on the compute hosts are not exposed to API users.
Flavors configured for vGPU support can be tied to host aggregates as a means
to properly schedule those flavors onto the compute hosts that support them.
-See the :doc:`/user/aggregates` for more information.
+See :doc:`/admin/aggregates` for more information.
+
Create instances with virtual GPU devices
-----------------------------------------
@@ -79,90 +111,178 @@ provided by compute nodes.
$ openstack server create --flavor vgpu_1 --image cirros-0.3.5-x86_64-uec --wait test-vgpu
-.. note::
-
- As of the Queens release, only the *FilterScheduler* scheduler driver
- uses the Placement API.
-
How to discover a GPU type
--------------------------
-Depending on your hypervisor:
+Virtual GPUs are seen as mediated devices. Physical PCI devices (the graphic
+card here) supporting virtual GPUs propose mediated device (mdev) types. Since
+mediated devices are supported by the Linux kernel through sysfs files after
+installing the vendor's virtual GPUs driver software, you can see the required
+properties as follows:
+
+.. code-block:: console
-- For libvirt, virtual GPUs are seen as mediated devices. Physical PCI devices
- (the graphic card here) supporting virtual GPUs propose mediated device
- (mdev) types. Since mediated devices are supported by the Linux kernel
- through sysfs files, you can see the required properties as follows:
+ $ ls /sys/class/mdev_bus/*/mdev_supported_types
+ /sys/class/mdev_bus/0000:84:00.0/mdev_supported_types:
+ nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45
- .. code-block:: console
+ /sys/class/mdev_bus/0000:85:00.0/mdev_supported_types:
+ nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45
- $ ls /sys/class/mdev_bus/*/mdev_supported_types
- /sys/class/mdev_bus/0000:84:00.0/mdev_supported_types:
- nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45
+ /sys/class/mdev_bus/0000:86:00.0/mdev_supported_types:
+ nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45
- /sys/class/mdev_bus/0000:85:00.0/mdev_supported_types:
- nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45
+ /sys/class/mdev_bus/0000:87:00.0/mdev_supported_types:
+ nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45
- /sys/class/mdev_bus/0000:86:00.0/mdev_supported_types:
- nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45
- /sys/class/mdev_bus/0000:87:00.0/mdev_supported_types:
- nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45
+Checking allocations and inventories for virtual GPUs
+-----------------------------------------------------
+.. note::
-- For XenServer, virtual GPU types are created by XenServer at startup
- depending on the available hardware and config files present in dom0.
- You can run the command of ``xe vgpu-type-list`` from dom0 to get the
- available vGPU types. The value for the field of ``model-name ( RO):``
- is the vGPU type's name which can be used to set the nova config option
- ``[devices]/enabled_vgpu_types``. See the following example:
+ The information below is only valid from the 19.0.0 Stein release. Before
+ this release, inventories and allocations related to a ``VGPU`` resource
+ class are still on the root resource provider related to the compute node.
+ If upgrading from Rocky and using the libvirt driver, ``VGPU`` inventory and
+ allocations are moved to child resource providers that represent actual
+ physical GPUs.
+
+The examples you will see are using the `osc-placement plugin`_ for
+OpenStackClient. For details on specific commands, see its documentation.
+
+#. Get the list of resource providers
+
+ .. code-block:: console
+
+ $ openstack resource provider list
+ +--------------------------------------+---------------------------------------------------------+------------+
+ | uuid | name | generation |
+ +--------------------------------------+---------------------------------------------------------+------------+
+ | 5958a366-3cad-416a-a2c9-cfbb5a472287 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx | 7 |
+ | fc9b9287-ef5e-4408-aced-d5577560160c | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_86_00_0 | 2 |
+ | e2f8607b-0683-4141-a8af-f5e20682e28c | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_85_00_0 | 3 |
+ | 85dd4837-76f9-41f2-9f19-df386017d8a0 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_87_00_0 | 2 |
+ | 7033d860-8d8a-4963-8555-0aa902a08653 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_84_00_0 | 2 |
+ +--------------------------------------+---------------------------------------------------------+------------+
+
+ In this example, we see the root resource provider
+ ``5958a366-3cad-416a-a2c9-cfbb5a472287`` with four other resource providers
+ that are its children and where each of them corresponds to a single
+ physical GPU.
+
+#. Check the inventory of each resource provider to see resource classes
+
+ .. code-block:: console
+
+ $ openstack resource provider inventory list 5958a366-3cad-416a-a2c9-cfbb5a472287
+ +----------------+------------------+----------+----------+-----------+----------+-------+
+ | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total |
+ +----------------+------------------+----------+----------+-----------+----------+-------+
+ | VCPU | 16.0 | 48 | 0 | 1 | 1 | 48 |
+ | MEMORY_MB | 1.5 | 65442 | 512 | 1 | 1 | 65442 |
+ | DISK_GB | 1.0 | 49 | 0 | 1 | 1 | 49 |
+ +----------------+------------------+----------+----------+-----------+----------+-------+
+ $ openstack resource provider inventory list e2f8607b-0683-4141-a8af-f5e20682e28c
+ +----------------+------------------+----------+----------+-----------+----------+-------+
+ | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total |
+ +----------------+------------------+----------+----------+-----------+----------+-------+
+ | VGPU | 1.0 | 16 | 0 | 1 | 1 | 16 |
+ +----------------+------------------+----------+----------+-----------+----------+-------+
+
+ Here you can see a ``VGPU`` inventory on the child resource provider while
+ other resource class inventories are still located on the root resource
+ provider.
+
+#. Check allocations for each server that is using virtual GPUs
+
+ .. code-block:: console
+
+ $ openstack server list
+ +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+
+ | ID | Name | Status | Networks | Image | Flavor |
+ +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+
+ | 5294f726-33d5-472a-bef1-9e19bb41626d | vgpu2 | ACTIVE | private=10.0.0.14, fd45:cdad:c431:0:f816:3eff:fe78:a748 | cirros-0.4.0-x86_64-disk | vgpu |
+ | a6811fc2-cec8-4f1d-baea-e2c6339a9697 | vgpu1 | ACTIVE | private=10.0.0.34, fd45:cdad:c431:0:f816:3eff:fe54:cc8f | cirros-0.4.0-x86_64-disk | vgpu |
+ +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+
+
+ $ openstack resource provider allocation show 5294f726-33d5-472a-bef1-9e19bb41626d
+ +--------------------------------------+------------+------------------------------------------------+
+ | resource_provider | generation | resources |
+ +--------------------------------------+------------+------------------------------------------------+
+ | 5958a366-3cad-416a-a2c9-cfbb5a472287 | 8 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} |
+ | 7033d860-8d8a-4963-8555-0aa902a08653 | 3 | {u'VGPU': 1} |
+ +--------------------------------------+------------+------------------------------------------------+
+
+ $ openstack resource provider allocation show a6811fc2-cec8-4f1d-baea-e2c6339a9697
+ +--------------------------------------+------------+------------------------------------------------+
+ | resource_provider | generation | resources |
+ +--------------------------------------+------------+------------------------------------------------+
+ | e2f8607b-0683-4141-a8af-f5e20682e28c | 3 | {u'VGPU': 1} |
+ | 5958a366-3cad-416a-a2c9-cfbb5a472287 | 8 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} |
+ +--------------------------------------+------------+------------------------------------------------+
+
+ In this example, two servers were created using a flavor asking for 1
+ ``VGPU``, so when looking at the allocations for each consumer UUID (which
+ is the server UUID), you can see that VGPU allocation is against the child
+ resource provider while other allocations are for the root resource
+ provider. Here, that means that the virtual GPU used by
+ ``a6811fc2-cec8-4f1d-baea-e2c6339a9697`` is actually provided by the
+ physical GPU having the PCI ID ``0000:85:00.0``.
+
+
+(Optional) Provide custom traits for multiple GPU types
+-------------------------------------------------------
+
+Since operators want to support different GPU types per compute, it would be
+nice to have flavors asking for a specific GPU type. This is now possible
+using custom traits by decorating child Resource Providers that correspond
+to physical GPUs.
- .. code-block:: console
+.. note::
- [root@trailblazer-2 ~]# xe vgpu-type-list
- uuid ( RO) : 78d2d963-41d6-4130-8842-aedbc559709f
- vendor-name ( RO): NVIDIA Corporation
- model-name ( RO): GRID M60-8Q
- max-heads ( RO): 4
- max-resolution ( RO): 4096x2160
+ Possible improvements in a future release could consist of providing
+ automatic tagging of Resource Providers with standard traits corresponding
+ to versioned mapping of public GPU types. For the moment, this has to be
+ done manually.
+#. Get the list of resource providers
- uuid ( RO) : a1bb1692-8ce3-4577-a611-6b4b8f35a5c9
- vendor-name ( RO): NVIDIA Corporation
- model-name ( RO): GRID M60-0Q
- max-heads ( RO): 2
- max-resolution ( RO): 2560x1600
+ See `Checking allocations and inventories for virtual GPUs`_ first for getting
+ the list of Resource Providers that support a ``VGPU`` resource class.
+#. Define custom traits that will correspond for each to a GPU type
- uuid ( RO) : 69d03200-49eb-4002-b661-824aec4fd26f
- vendor-name ( RO): NVIDIA Corporation
- model-name ( RO): GRID M60-2A
- max-heads ( RO): 1
- max-resolution ( RO): 1280x1024
+ .. code-block:: console
+ $ openstack --os-placement-api-version 1.6 trait create CUSTOM_NVIDIA_11
- uuid ( RO) : c58b1007-8b47-4336-95aa-981a5634d03d
- vendor-name ( RO): NVIDIA Corporation
- model-name ( RO): GRID M60-4Q
- max-heads ( RO): 4
- max-resolution ( RO): 4096x2160
+ In this example, we ask to create a custom trait named ``CUSTOM_NVIDIA_11``.
+#. Add the corresponding trait to the Resource Provider matching the GPU
- uuid ( RO) : 292a2b20-887f-4a13-b310-98a75c53b61f
- vendor-name ( RO): NVIDIA Corporation
- model-name ( RO): GRID M60-2Q
- max-heads ( RO): 4
- max-resolution ( RO): 4096x2160
+ .. code-block:: console
+ $ openstack --os-placement-api-version 1.6 resource provider trait set \
+ --trait CUSTOM_NVIDIA_11 e2f8607b-0683-4141-a8af-f5e20682e28c
- uuid ( RO) : d377db6b-a068-4a98-92a8-f94bd8d6cc5d
- vendor-name ( RO): NVIDIA Corporation
- model-name ( RO): GRID M60-0B
- max-heads ( RO): 2
- max-resolution ( RO): 2560x1600
+ In this case, the trait ``CUSTOM_NVIDIA_11`` will be added to the Resource
+ Provider with the UUID ``e2f8607b-0683-4141-a8af-f5e20682e28c`` that
+ corresponds to the PCI address ``0000:85:00:0`` as shown above.
- ...
+#. Amend the flavor to add a requested trait
+
+ .. code-block:: console
+
+ $ openstack flavor set --property trait:CUSTOM_NVIDIA_11=required vgpu_1
+
+ In this example, we add the ``CUSTOM_NVIDIA_11`` trait as a required
+ information for the ``vgpu_1`` flavor we created earlier.
+
+ This will allow the Placement service to only return the Resource Providers
+ matching this trait so only the GPUs that were decorated with will be checked
+ for this flavor.
Caveats
@@ -173,8 +293,6 @@ Caveats
This information is correct as of the 17.0.0 Queens release. Where
improvements have been made or issues fixed, they are noted per item.
-For libvirt:
-
* Suspending a guest that has vGPUs doesn't yet work because of a libvirt
limitation (it can't hot-unplug mediated devices from a guest). Workarounds
using other instance actions (like snapshotting the instance or shelving it)
@@ -183,45 +301,63 @@ For libvirt:
that will cause the instance to be set back to ACTIVE. The ``suspend`` action
in the ``os-instance-actions`` API will have an *Error* state.
+ .. versionchanged:: 25.0.0
+
+ This has been resolved in the Yoga release and backported to Xena. See
+ `bug 1948705`_.
+
* Resizing an instance with a new flavor that has vGPU resources doesn't
allocate those vGPUs to the instance (the instance is created without
vGPU resources). The proposed workaround is to rebuild the instance after
resizing it. The rebuild operation allocates vGPUS to the instance.
+ .. versionchanged:: 21.0.0
+
+ This has been resolved in the Ussuri release. See `bug 1778563`_.
+
* Cold migrating an instance to another host will have the same problem as
resize. If you want to migrate an instance, make sure to rebuild it after the
migration.
+ .. versionchanged:: 21.0.0
+
+ This has been resolved in the Ussuri release. See `bug 1778563`_.
+
* Rescue images do not use vGPUs. An instance being rescued does not keep its
vGPUs during rescue. During that time, another instance can receive those
vGPUs. This is a known issue. The recommended workaround is to rebuild an
instance immediately after rescue. However, rebuilding the rescued instance
only helps if there are other free vGPUs on the host.
- .. note:: This has been resolved in the Rocky release [#]_.
+ .. versionchanged:: 18.0.0
-For XenServer:
+ This has been resolved in the Rocky release. See `bug 1762688`_.
-* Suspend and live migration with vGPUs attached depends on support from the
- underlying XenServer version. Please see XenServer release notes for up to
- date information on when a hypervisor supporting live migration and
- suspend/resume with vGPUs is available. If a suspend or live migrate operation
- is attempted with a XenServer version that does not support that operation, an
- internal exception will occur that will cause nova setting the instance to
- be in ERROR status. You can use the command of
- ``openstack server set --state active `` to set it back to ACTIVE.
+For nested vGPUs:
-* Resizing an instance with a new flavor that has vGPU resources doesn't
- allocate those vGPUs to the instance (the instance is created without
- vGPU resources). The proposed workaround is to rebuild the instance after
- resizing it. The rebuild operation allocates vGPUS to the instance.
+.. note::
-* Cold migrating an instance to another host will have the same problem as
- resize. If you want to migrate an instance, make sure to rebuild it after the
- migration.
+ This information is correct as of the 21.0.0 Ussuri release. Where
+ improvements have been made or issues fixed, they are noted per item.
+
+* If creating servers with a flavor asking for vGPUs and the user wants
+ multi-create (i.e. say --max 2) then the scheduler could be returning
+ a NoValidHosts exception even if each physical GPU can support at least
+ one specific instance, if the total wanted capacity is not supported by
+ only one physical GPU.
+ (See `bug 1874664 `_.)
+
+ For example, creating servers with a flavor asking for vGPUs, if two
+ children RPs have 4 vGPU inventories each:
+
+ - You can ask for a flavor with 2 vGPU with --max 2.
+ - But you can't ask for a flavor with 4 vGPU and --max 2.
-.. [#] https://bugs.launchpad.net/nova/+bug/1762688
+.. _bug 1778563: https://bugs.launchpad.net/nova/+bug/1778563
+.. _bug 1762688: https://bugs.launchpad.net/nova/+bug/1762688
+.. _bug 1948705: https://bugs.launchpad.net/nova/+bug/1948705
.. Links
.. _Intel GVT-g: https://01.org/igvt-g
.. _NVIDIA GRID vGPU: http://docs.nvidia.com/grid/5.0/pdf/grid-vgpu-user-guide.pdf
+.. _osc-placement plugin: https://docs.openstack.org/osc-placement/latest/index.html
diff --git a/doc/source/admin/virtual-persistent-memory.rst b/doc/source/admin/virtual-persistent-memory.rst
new file mode 100644
index 00000000000..95ad9a942f8
--- /dev/null
+++ b/doc/source/admin/virtual-persistent-memory.rst
@@ -0,0 +1,270 @@
+=============================================
+Attaching virtual persistent memory to guests
+=============================================
+
+.. versionadded:: 20.0.0 (Train)
+
+Starting in the 20.0.0 (Train) release, the virtual persistent memory (vPMEM)
+feature in Nova allows a deployment using the libvirt compute driver to provide
+vPMEMs for instances using physical persistent memory (PMEM) that can provide
+virtual devices.
+
+PMEM must be partitioned into `PMEM namespaces`_ for applications to use.
+This vPMEM feature only uses PMEM namespaces in ``devdax`` mode as QEMU
+`vPMEM backends`_. If you want to dive into related notions, the document
+`NVDIMM Linux kernel document`_ is recommended.
+
+To enable vPMEMs, follow the steps below.
+
+
+Dependencies
+------------
+
+The following are required to support the vPMEM feature:
+
+* Persistent Memory Hardware
+
+ One such product is Intel® Optane™ DC Persistent Memory.
+ `ipmctl`_ is used to configure it.
+
+* Linux Kernel version >= 4.18 with the following modules loaded:
+
+ ``dax_pmem``, ``nd_pmem``, ``device_dax``, ``nd_btt``
+
+.. note::
+
+ NVDIMM support is present in the Linux Kernel v4.0 or newer. It is
+ recommended to use Kernel version 4.2 or later since `NVDIMM support
+ `_
+ is enabled by default. We met some bugs in older versions, and we have
+ done all verification works with OpenStack on 4.18 version, so 4.18
+ version and newer will probably guarantee its functionality.
+
+* QEMU version >= 3.1.0
+
+* Libvirt version >= 5.0.0
+
+* `ndctl`_ version >= 62
+
+* daxio version >= 1.6
+
+The vPMEM feature has been verified under the software and hardware listed above.
+
+
+Configure PMEM namespaces (Compute)
+-----------------------------------
+
+#. Create PMEM namespaces as `vPMEM backends`_ using the `ndctl`_ utility.
+
+ For example, to create a 30GiB namespace named ``ns3``:
+
+ .. code-block:: console
+
+ $ sudo ndctl create-namespace -s 30G -m devdax -M mem -n ns3
+ {
+ "dev":"namespace1.0",
+ "mode":"devdax",
+ "map":"mem",
+ "size":"30.00 GiB (32.21 GB)",
+ "uuid":"937e9269-512b-4f65-9ac6-b74b61075c11",
+ "raw_uuid":"17760832-a062-4aef-9d3b-95ea32038066",
+ "daxregion":{
+ "id":1,
+ "size":"30.00 GiB (32.21 GB)",
+ "align":2097152,
+ "devices":[
+ {
+ "chardev":"dax1.0",
+ "size":"30.00 GiB (32.21 GB)"
+ }
+ ]
+ },
+ "name":"ns3",
+ "numa_node":1
+ }
+
+ Then list the available PMEM namespaces on the host:
+
+ .. code-block:: console
+
+ $ ndctl list -X
+ [
+ {
+ ...
+ "size":6440353792,
+ ...
+ "name":"ns0",
+ ...
+ },
+ {
+ ...
+ "size":6440353792,
+ ...
+ "name":"ns1",
+ ...
+ },
+ {
+ ...
+ "size":6440353792,
+ ...
+ "name":"ns2",
+ ...
+ },
+ {
+ ...
+ "size":32210157568,
+ ...
+ "name":"ns3",
+ ...
+ }
+ ]
+
+#. Specify which PMEM namespaces should be available to instances.
+
+ Edit :oslo.config:option:`libvirt.pmem_namespaces`:
+
+ .. code-block:: ini
+
+ [libvirt]
+ # pmem_namespaces=$LABEL:$NSNAME[|$NSNAME][,$LABEL:$NSNAME[|$NSNAME]]
+ pmem_namespaces = 6GB:ns0|ns1|ns2,LARGE:ns3
+
+ Configured PMEM namespaces must have already been created on the host as
+ described above. The conf syntax allows the admin to associate one or more
+ namespace ``$NSNAME``\ s with an arbitrary ``$LABEL`` that can subsequently
+ be used in a flavor to request one of those namespaces. It is recommended,
+ but not required, for namespaces under a single ``$LABEL`` to be the same
+ size.
+
+#. Restart the ``nova-compute`` service.
+
+ Nova will invoke `ndctl`_ to identify the configured PMEM namespaces, and
+ report vPMEM resources to placement.
+
+
+Configure a flavor
+------------------
+
+Specify a comma-separated list of the ``$LABEL``\ s from
+:oslo.config:option:`libvirt.pmem_namespaces` to the flavor's ``hw:pmem``
+property. Note that multiple instances of the same label are permitted:
+
+.. code-block:: console
+
+ $ openstack flavor set --property hw:pmem='6GB' my_flavor
+ $ openstack flavor set --property hw:pmem='6GB,LARGE' my_flavor_large
+ $ openstack flavor set --property hw:pmem='6GB,6GB' m1.medium
+
+.. note:: If a NUMA topology is specified, all vPMEM devices will be put on
+ guest NUMA node 0; otherwise nova will generate one NUMA node
+ automatically for the guest.
+
+Based on the above examples, an ``openstack server create`` request with
+``my_flavor_large`` will spawn an instance with two vPMEMs. One, corresponding
+to the ``LARGE`` label, will be ``ns3``; the other, corresponding to the ``6G``
+label, will be arbitrarily chosen from ``ns0``, ``ns1``, or ``ns2``.
+
+.. note::
+
+ Using vPMEM inside a virtual machine requires the following:
+
+ * Guest kernel version 4.18 or higher;
+ * The ``dax_pmem``, ``nd_pmem``, ``device_dax``, and ``nd_btt`` kernel
+ modules;
+ * The `ndctl`_ utility.
+
+.. note:: When resizing an instance with vPMEMs, the vPMEM data won't be
+ migrated.
+
+
+Verify inventories and allocations
+----------------------------------
+This section describes how to check that:
+
+* vPMEM inventories were created correctly in placement, validating the
+ `configuration described above <#configure-pmem-namespaces-compute>`_.
+* allocations were created correctly in placement for instances spawned from
+ `flavors configured with vPMEMs <#configure-a-flavor>`_.
+
+.. note::
+
+ Inventories and allocations related to vPMEM resource classes are on the
+ root resource provider related to the compute node.
+
+#. Get the list of resource providers
+
+ .. code-block:: console
+
+ $ openstack resource provider list
+ +--------------------------------------+--------+------------+
+ | uuid | name | generation |
+ +--------------------------------------+--------+------------+
+ | 1bc545f9-891f-4930-ab2b-88a56078f4be | host-1 | 47 |
+ | 7d994aef-680d-43d4-9325-a67c807e648e | host-2 | 67 |
+ --------------------------------------+---------+------------+
+
+#. Check the inventory of each resource provider to see resource classes
+
+ Each ``$LABEL`` configured in :oslo.config:option:`libvirt.pmem_namespaces`
+ is used to generate a resource class named ``CUSTOM_PMEM_NAMESPACE_$LABEL``.
+ Nova will report to Placement the number of vPMEM namespaces configured for
+ each ``$LABEL``. For example, assuming ``host-1`` was configured as
+ described above:
+
+ .. code-block:: console
+
+ $ openstack resource provider inventory list 1bc545f9-891f-4930-ab2b-88a56078f4be
+ +-----------------------------+------------------+----------+----------+-----------+----------+--------+
+ | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total |
+ +-----------------------------+------------------+----------+----------+-----------+----------+--------+
+ | VCPU | 16.0 | 64 | 0 | 1 | 1 | 64 |
+ | MEMORY_MB | 1.5 | 190604 | 512 | 1 | 1 | 190604 |
+ | CUSTOM_PMEM_NAMESPACE_LARGE | 1.0 | 1 | 0 | 1 | 1 | 1 |
+ | CUSTOM_PMEM_NAMESPACE_6GB | 1.0 | 3 | 0 | 1 | 1 | 3 |
+ | DISK_GB | 1.0 | 439 | 0 | 1 | 1 | 439 |
+ +-----------------------------+------------------+----------+----------+-----------+----------+--------+
+
+ Here you can see the vPMEM resource classes prefixed with
+ ``CUSTOM_PMEM_NAMESPACE_``. The ``LARGE`` label was configured with one
+ namespace (``ns3``), so it has an inventory of ``1``. Since the ``6GB``
+ label was configured with three namespaces (``ns0``, ``ns1``, and ``ns2``),
+ the ``CUSTOM_PMEM_NAMESPACE_6GB`` inventory has a ``total`` and ``max_unit``
+ of ``3``.
+
+#. Check allocations for each server that is using vPMEMs
+
+ .. code-block:: console
+
+ $ openstack server list
+ +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+
+ | ID | Name | Status | Networks | Image | Flavor |
+ +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+
+ | 41d3e139-de5c-40fd-9d82-016b72f2ba1d | server-with-2-vpmems | ACTIVE | private=10.0.0.24 | ubuntu-bionic | my_flavor_large |
+ | a616a7f6-b285-4adf-a885-dd8426dd9e6a | server-with-1-vpmem | ACTIVE | private=10.0.0.13 | ubuntu-bionic | my_flavor |
+ +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+
+
+ $ openstack resource provider allocation show 41d3e139-de5c-40fd-9d82-016b72f2ba1d
+ +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+
+ | resource_provider | generation | resources |
+ +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+
+ | 1bc545f9-891f-4930-ab2b-88a56078f4be | 49 | {u'MEMORY_MB': 32768, u'VCPU': 16, u'DISK_GB': 20, u'CUSTOM_PMEM_NAMESPACE_6GB': 1, u'CUSTOM_PMEM_NAMESPACE_LARGE': 1} |
+ +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+
+
+ $ openstack resource provider allocation show a616a7f6-b285-4adf-a885-dd8426dd9e6a
+ +--------------------------------------+------------+-----------------------------------------------------------------------------------+
+ | resource_provider | generation | resources |
+ +--------------------------------------+------------+-----------------------------------------------------------------------------------+
+ | 1bc545f9-891f-4930-ab2b-88a56078f4be | 49 | {u'MEMORY_MB': 8192, u'VCPU': 8, u'DISK_GB': 20, u'CUSTOM_PMEM_NAMESPACE_6GB': 1} |
+ +--------------------------------------+------------+-----------------------------------------------------------------------------------+
+
+ In this example, two servers were created. ``server-with-2-vpmems`` used
+ ``my_flavor_large`` asking for one ``6GB`` vPMEM and one ``LARGE`` vPMEM.
+ ``server-with-1-vpmem`` used ``my_flavor`` asking for a single ``6GB``
+ vPMEM.
+
+
+.. _`PMEM namespaces`: http://pmem.io/ndctl/ndctl-create-namespace.html
+.. _`vPMEM backends`: https://github.com/qemu/qemu/blob/19b599f7664b2ebfd0f405fb79c14dd241557452/docs/nvdimm.txt#L145
+.. _`NVDIMM Linux kernel document`: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt
+.. _`ipmctl`: https://software.intel.com/en-us/articles/quick-start-guide-configure-intel-optane-dc-persistent-memory-on-linux
+.. _`ndctl`: http://pmem.io/ndctl/
diff --git a/doc/source/cli/index.rst b/doc/source/cli/index.rst
index bf2d164d9be..a5b1d89bfc1 100644
--- a/doc/source/cli/index.rst
+++ b/doc/source/cli/index.rst
@@ -31,6 +31,7 @@ database.
:maxdepth: 1
nova-manage
+ nova-policy
nova-status
Service Daemons
@@ -46,12 +47,10 @@ daemonize correctly after starting up.
nova-api
nova-compute
nova-conductor
- nova-console
nova-novncproxy
nova-scheduler
nova-serialproxy
nova-spicehtml5proxy
- nova-xvpvncproxy
WSGI Services
-------------
@@ -77,17 +76,3 @@ are documented for completeness and debugging if something goes wrong.
:maxdepth: 1
nova-rootwrap
-
-Deprecated Services
--------------------
-
-The following services are deprecated in nova. They should not be used in new
-deployments, but are documented for existing ones.
-
-.. toctree::
- :maxdepth: 1
-
- nova-cells
- nova-dhcpbridge
- nova-network
- nova-consoleauth
diff --git a/doc/source/cli/nova-api-metadata.rst b/doc/source/cli/nova-api-metadata.rst
index 3121280fd4d..f6f5d8afba2 100644
--- a/doc/source/cli/nova-api-metadata.rst
+++ b/doc/source/cli/nova-api-metadata.rst
@@ -2,47 +2,49 @@
nova-api-metadata
=================
---------------------------------
-Server for the Nova Metadata API
---------------------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
+.. program:: nova-api-metadata
Synopsis
========
::
- nova-api-metadata [options]
+ nova-api-metadata [...]
Description
===========
:program:`nova-api-metadata` is a server daemon that serves the Nova Metadata
-API.
+API. This daemon routes database requests via the ``nova-conductor`` service,
+so there are some considerations about using this in a
+:ref:`multi-cell layout `.
Options
=======
-**General options**
+.. rubric:: General options
+
+.. include:: opts/common.rst
+
+.. rubric:: Debugger options
+
+.. include:: opts/debugger.rst
Files
=====
* ``/etc/nova/nova.conf``
* ``/etc/nova/api-paste.ini``
-* ``/etc/nova/policy.json``
+* ``/etc/nova/policy.yaml``
+* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
See Also
========
-* :nova-doc:`OpenStack Nova <>`
-* :nova-doc:`Using WSGI with Nova `
+:doc:`nova-api(1) `,
+:doc:`nova-api-os-compute(1) `
Bugs
====
diff --git a/doc/source/cli/nova-api-os-compute.rst b/doc/source/cli/nova-api-os-compute.rst
index 56d53eed208..6564f03626c 100644
--- a/doc/source/cli/nova-api-os-compute.rst
+++ b/doc/source/cli/nova-api-os-compute.rst
@@ -2,21 +2,14 @@
nova-api-os-compute
===================
-------------------------------------------
-Server for the Nova OpenStack Compute APIs
-------------------------------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
+.. program:: nova-api-os-compute
Synopsis
========
::
- nova-api-os-compute [options]
+ nova-api-os-compute [...]
Description
===========
@@ -27,22 +20,29 @@ OpenStack Compute API.
Options
=======
-**General options**
+.. rubric:: General options
+
+.. include:: opts/common.rst
+
+.. rubric:: Debugger options
+
+.. include:: opts/debugger.rst
Files
=====
* ``/etc/nova/nova.conf``
* ``/etc/nova/api-paste.ini``
-* ``/etc/nova/policy.json``
+* ``/etc/nova/policy.yaml``
+* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
See Also
========
-* :nova-doc:`OpenStack Nova <>`
-* :nova-doc:`Using WSGI with Nova `
+:doc:`nova-api(1) `,
+:doc:`nova-api-metadata(1) `
Bugs
====
diff --git a/doc/source/cli/nova-api.rst b/doc/source/cli/nova-api.rst
index ea58d671a02..b10efc6b391 100644
--- a/doc/source/cli/nova-api.rst
+++ b/doc/source/cli/nova-api.rst
@@ -2,21 +2,14 @@
nova-api
========
--------------------------------------
-Server for the OpenStack Compute APIs
--------------------------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
+.. program:: nova-api
Synopsis
========
::
- nova-api [options]
+ nova-api [...]
Description
===========
@@ -27,22 +20,29 @@ APIs in separate greenthreads.
Options
=======
-**General options**
+.. rubric:: General options
+
+.. include:: opts/common.rst
+
+.. rubric:: Debugger options
+
+.. include:: opts/debugger.rst
Files
=====
* ``/etc/nova/nova.conf``
* ``/etc/nova/api-paste.ini``
-* ``/etc/nova/policy.json``
+* ``/etc/nova/policy.yaml``
+* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
See Also
========
-* :nova-doc:`OpenStack Nova <>`
-* :nova-doc:`Using WSGI with Nova `
+:doc:`nova-api-metadata(1) `,
+:doc:`nova-api-os-compute(1) `
Bugs
====
diff --git a/doc/source/cli/nova-cells.rst b/doc/source/cli/nova-cells.rst
deleted file mode 100644
index 90b66393848..00000000000
--- a/doc/source/cli/nova-cells.rst
+++ /dev/null
@@ -1,54 +0,0 @@
-==========
-nova-cells
-==========
-
--------------------------
-Server for the Nova Cells
--------------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
-
-Synopsis
-========
-
-::
-
- nova-cells [options]
-
-Description
-===========
-
-:program:`nova-cells` is a server daemon that serves the Nova Cells service,
-which handles communication between cells and selects cells for new instances.
-
-.. deprecated:: 16.0.0
- Everything in this document is referring to Cells v1, which is
- not recommended for new deployments and is deprecated in favor of Cells v2
- as of the 16.0.0 Pike release. For information about commands to use
- with Cells v2, see the man page for :ref:`man-page-cells-v2`.
-
-Options
-=======
-
-**General options**
-
-Files
-=====
-
-* ``/etc/nova/nova.conf``
-* ``/etc/nova/policy.json``
-* ``/etc/nova/rootwrap.conf``
-* ``/etc/nova/rootwrap.d/``
-
-See Also
-========
-
-* :nova-doc:`OpenStack Nova <>`
-
-Bugs
-====
-
-* Nova bugs are managed at `Launchpad `__
diff --git a/doc/source/cli/nova-compute.rst b/doc/source/cli/nova-compute.rst
index f48478e8be8..f190949efa5 100644
--- a/doc/source/cli/nova-compute.rst
+++ b/doc/source/cli/nova-compute.rst
@@ -2,21 +2,14 @@
nova-compute
============
--------------------
-Nova Compute Server
--------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
+.. program:: nova-compute
Synopsis
========
::
- nova-compute [options]
+ nova-compute [...]
Description
===========
@@ -29,20 +22,34 @@ instance's state, attaching persistent storage, and terminating the instance.
Options
=======
-**General options**
+.. rubric:: General options
+
+.. include:: opts/common.rst
+
+.. rubric:: Debugger options
+
+.. include:: opts/debugger.rst
Files
=====
+.. todo: We shouldn't have policy configuration in this non-API service, but
+ bug #1675486 means we do have one
+
* ``/etc/nova/nova.conf``
-* ``/etc/nova/policy.json``
+* ``/etc/nova/policy.yaml``
+* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
See Also
========
-* :nova-doc:`OpenStack Nova <>`
+:doc:`nova-conductor(1) `,
+:doc:`nova-manage(1) `,
+:doc:`nova-rootwrap(1) `,
+:doc:`nova-scheduler(1) `,
+:doc:`nova-status(1) `
Bugs
====
diff --git a/doc/source/cli/nova-conductor.rst b/doc/source/cli/nova-conductor.rst
index cfa53806452..3020250e398 100644
--- a/doc/source/cli/nova-conductor.rst
+++ b/doc/source/cli/nova-conductor.rst
@@ -2,21 +2,14 @@
nova-conductor
==============
------------------------------
-Server for the Nova Conductor
------------------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
+.. program:: nova-conductor
Synopsis
========
::
- nova-conductor [options]
+ nova-conductor [...]
Description
===========
@@ -27,7 +20,13 @@ service, which provides coordination and database query support for nova.
Options
=======
-**General options**
+.. rubric:: General options
+
+.. include:: opts/common.rst
+
+.. rubric:: Debugger options
+
+.. include:: opts/debugger.rst
Files
=====
@@ -37,7 +36,11 @@ Files
See Also
========
-* :nova-doc:`OpenStack Nova <>`
+:doc:`nova-compute(1) `,
+:doc:`nova-manage(1) `,
+:doc:`nova-rootwrap(1) `,
+:doc:`nova-scheduler(1) `,
+:doc:`nova-status(1) `
Bugs
====
diff --git a/doc/source/cli/nova-console.rst b/doc/source/cli/nova-console.rst
deleted file mode 100644
index 1d3eb4e1860..00000000000
--- a/doc/source/cli/nova-console.rst
+++ /dev/null
@@ -1,49 +0,0 @@
-============
-nova-console
-============
-
--------------------
-Nova Console Server
--------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
-
-Synopsis
-========
-
-::
-
- nova-console [options]
-
-Description
-===========
-
-:program:`nova-console` is a server daemon that serves the Nova Console
-service, which is a console proxy to set up multi-tenant VM console access,
-e.g. with *XVP*.
-
-Options
-=======
-
-**General options**
-
-Files
-=====
-
-* ``/etc/nova/nova.conf``
-* ``/etc/nova/policy.json``
-* ``/etc/nova/rootwrap.conf``
-* ``/etc/nova/rootwrap.d/``
-
-See Also
-========
-
-* :nova-doc:`OpenStack Nova <>`
-
-Bugs
-====
-
-* Nova bugs are managed at `Launchpad `__
diff --git a/doc/source/cli/nova-consoleauth.rst b/doc/source/cli/nova-consoleauth.rst
deleted file mode 100644
index 32e037ae59a..00000000000
--- a/doc/source/cli/nova-consoleauth.rst
+++ /dev/null
@@ -1,53 +0,0 @@
-================
-nova-consoleauth
-================
-
-----------------------------------
-Nova Console Authentication Server
-----------------------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
-
-Synopsis
-========
-
-::
-
- nova-consoleauth [options]
-
-Description
-===========
-
-:program:`nova-consoleauth` is a server daemon that serves the Nova Console
-Auth service, which provides authentication for Nova consoles.
-
-.. deprecated:: 18.0.0
-
- `nova-consoleauth` is deprecated since 18.0.0 (Rocky) and will be removed in
- an upcoming release.
-
-Options
-=======
-
-**General options**
-
-Files
-=====
-
-* ``/etc/nova/nova.conf``
-* ``/etc/nova/policy.json``
-* ``/etc/nova/rootwrap.conf``
-* ``/etc/nova/rootwrap.d/``
-
-See Also
-========
-
-* :nova-doc:`OpenStack Nova <>`
-
-Bugs
-====
-
-* Nova bugs are managed at `Launchpad `__
diff --git a/doc/source/cli/nova-dhcpbridge.rst b/doc/source/cli/nova-dhcpbridge.rst
deleted file mode 100644
index 9fe5b143b8b..00000000000
--- a/doc/source/cli/nova-dhcpbridge.rst
+++ /dev/null
@@ -1,56 +0,0 @@
-===============
-nova-dhcpbridge
-===============
-
-------------------------------------------------
-Handles Lease Database updates from DHCP servers
-------------------------------------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
-
-Synopsis
-========
-
-::
-
- nova-dhcpbridge [options]
-
-Description
-===========
-
-:program:`nova-dhcpbridge` is an application that handles lease database
-updates from DHCP servers. :program:`nova-dhcpbridge` is used whenever nova is
-managing DHCP (vlan and flatDHCP). :program:`nova-dhcpbridge` should not be run
-as a daemon.
-
-.. warning::
-
- This application is only for use with ``nova-network``, which is not
- recommended for new deployments.
-
-Options
-=======
-
-**General options**
-
-Files
-=====
-
-* ``/etc/nova/nova.conf``
-* ``/etc/nova/api-paste.ini``
-* ``/etc/nova/policy.json``
-* ``/etc/nova/rootwrap.conf``
-* ``/etc/nova/rootwrap.d/``
-
-See Also
-========
-
-* :nova-doc:`OpenStack Nova <>`
-
-Bugs
-====
-
-* Nova bugs are managed at `Launchpad `__
diff --git a/doc/source/cli/nova-manage.rst b/doc/source/cli/nova-manage.rst
index 8c7c7f66fa7..4dc614db2aa 100644
--- a/doc/source/cli/nova-manage.rst
+++ b/doc/source/cli/nova-manage.rst
@@ -2,21 +2,15 @@
nova-manage
===========
--------------------------------------------
-Control and manage cloud computer instances
--------------------------------------------
-
-:Author: openstack@lists.openstack.org
-:Copyright: OpenStack Foundation
-:Manual section: 1
-:Manual group: cloud computing
+.. program:: nova-manage
Synopsis
========
::
- nova-manage []
+ nova-manage [ [...]]
+
Description
===========
@@ -24,334 +18,1653 @@ Description
:program:`nova-manage` controls cloud computing instances by managing various
admin-only aspects of Nova.
+The standard pattern for executing a :program:`nova-manage` command is::
+
+ nova-manage []
+
+Run without arguments to see a list of available command categories::
+
+ nova-manage
+
+You can also run with a category argument such as ``db`` to see a list of all
+commands in that category::
+
+ nova-manage db
+
+
Options
=======
-The standard pattern for executing a nova-manage command is::
+These options apply to all commands and may be given in any order, before or
+after commands. Individual commands may provide additional options. Options
+without an argument can be combined after a single dash.
- nova-manage []
+.. option:: -h, --help
-Run without arguments to see a list of available command categories::
+ Show a help message and exit
- nova-manage
+.. option:: --config-dir
-You can also run with a category argument such as user to see a list of all
-commands in that category::
+ Path to a config directory to pull ``*.conf`` files from. This file set is
+ sorted, so as to provide a predictable parse order if individual options
+ are over-ridden. The set is parsed after the file(s) specified via previous
+ :option:`--config-file`, arguments hence over-ridden options in the
+ directory take precedence. This option must be set from the command-line.
+
+.. option:: --config-file
+
+ Path to a config file to use. Multiple config files can be specified, with
+ values in later files taking precedence. Defaults to None. This option must
+ be set from the command-line.
+
+.. option:: --log-config-append , --log-config , --log_config
+
+ The name of a logging configuration file. This file is appended to any
+ existing logging configuration files. For details about logging
+ configuration files, see the Python logging module documentation. Note that
+ when logging configuration files are used then all logging configuration is
+ set in the configuration file and other logging configuration options are
+ ignored (for example, :option:`--log-date-format`).
+
+.. option:: --log-date-format
+
+ Defines the format string for ``%(asctime)s`` in log records. Default:
+ None. This option is ignored if :option:`--log-config-append` is set.
+
+.. option:: --log-dir , --logdir
+
+ The base directory used for relative log_file paths.
+ This option is ignored if :option:`--log-config-append` is set.
+
+.. option:: --log-file PATH, --logfile
+
+ Name of log file to send logging output to.
+ If no default is set, logging will go to stderr as defined by use_stderr.
+ This option is ignored if :option:`--log-config-append` is set.
+
+.. option:: --syslog-log-facility SYSLOG_LOG_FACILITY
+
+ Syslog facility to receive log lines.
+ This option is ignored if :option:`--log-config-append` is set.
+
+.. option:: --use-journal
+
+ Enable journald for logging. If running in a systemd environment you may
+ wish to enable journal support. Doing so will use the journal native
+ protocol which includes structured metadata in addition to log
+ messages. This option is ignored if :option:`--log-config-append` is
+ set.
+
+.. option:: --nouse-journal
+
+ The inverse of :option:`--use-journal`.
+
+.. option:: --use-json
+
+ Use JSON formatting for logging. This option is ignored if
+ :option:`--log-config-append` is set.
+
+.. option:: --nouse-json
+
+ The inverse of :option:`--use-json`.
+
+.. option:: --use-syslog
+
+ Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ changed later to honor RFC5424. This option is ignored if
+ :option:`--log-config-append` is set.
+
+.. option:: --nouse-syslog
+
+ The inverse of :option:`--use-syslog`.
+
+.. option:: --watch-log-file
+
+ Uses logging handler designed to watch file system. When log file is moved
+ or removed this handler will open a new log file with specified path
+ instantaneously. It makes sense only if :option:`--log-file` option is
+ specified and Linux platform is used. This option is ignored if
+ :option:`--log-config-append` is set.
+
+.. option:: --nowatch-log-file
+
+ The inverse of :option:`--watch-log-file`.
+
+.. option:: --debug, -d
+
+ If enabled, the logging level will be set to ``DEBUG`` instead of the
+ default ``INFO`` level.
+
+.. option:: --nodebug
+
+ The inverse of :option:`--debug`.
+
+.. option:: --post-mortem
+
+ Allow post-mortem debugging.
+
+.. option:: --nopost-mortem
+
+ The inverse of :option:`--post-mortem`.
+
+.. option:: --version
+
+ Show program's version number and exit
+
+
+Database Commands
+=================
+
+db version
+----------
+
+.. program:: nova-manage db version
+
+.. code-block:: shell
+
+ nova-manage db version
+
+Print the current main database version.
+
+db sync
+-------
+
+.. program:: nova-manage db sync
+
+.. code-block:: shell
+
+ nova-manage db sync [--local_cell] [VERSION]
+
+Upgrade the main database schema up to the most recent version or ``VERSION``
+if specified. By default, this command will also attempt to upgrade the schema
+for the cell0 database if it is mapped.
+If :option:`--local_cell` is specified, then only the main database in the
+current cell is upgraded. The local database connection is determined by
+:oslo.config:option:`database.connection` in the configuration file, passed to
+nova-manage using the ``--config-file`` option(s).
+
+Refer to the :program:`nova-manage cells_v2 map_cell0` or
+:program:`nova-manage cells_v2 simple_cell_setup` commands for more details on
+mapping the cell0 database.
+
+This command should be run **after** :program:`nova-manage api_db sync`.
+
+.. rubric:: Options
+
+.. option:: --local_cell
+
+ Only sync db in the local cell: do not attempt to fan-out to all cells.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Successfully synced database schema.
+ * - 1
+ - Failed to access cell0.
+
+.. versionchanged:: 20.0.0 (Train)
+
+ Removed support for the legacy ``--version `` argument.
+
+.. versionchanged:: 24.0.0 (Xena)
+
+ Migrated versioning engine to alembic. The optional ``VERSION`` argument is
+ now expected to be an alembic-based version. sqlalchemy-migrate-based
+ versions will be rejected.
+
+db archive_deleted_rows
+-----------------------
+
+.. program:: nova-manage db archive_deleted_rows
+
+.. code-block:: shell
+
+ nova-manage db archive_deleted_rows [--max_rows ] [--verbose]
+ [--until-complete] [--before ] [--purge] [--all-cells] [--task-log]
+ [--sleep]
+
+Move deleted rows from production tables to shadow tables. Note that the
+corresponding rows in the ``instance_mappings``, ``request_specs`` and
+``instance_group_member`` tables of the API database are purged when
+instance records are archived and thus,
+:oslo.config:option:`api_database.connection` is required in the config
+file.
+
+If automating, this should be run continuously while the result is 1,
+stopping at 0, or use the :option:`--until-complete` option.
+
+.. versionchanged:: 24.0.0 (Xena)
+
+ Added :option:`--task-log`, :option:`--sleep` options.
+
+.. rubric:: Options
+
+.. option:: --max_rows
+
+ Maximum number of deleted rows to archive. Defaults to 1000. Note that this
+ number does not include the corresponding rows, if any, that are removed
+ from the API database for deleted instances.
+
+.. option:: --before
+
+ Archive rows that have been deleted before ````. Accepts date strings
+ in the default format output by the ``date`` command, as well as
+ ``YYYY-MM-DD[HH:mm:ss]``. For example::
+
+ # Purge shadow table rows older than a specific date
+ nova-manage db archive --before 2015-10-21
+ # or
+ nova-manage db archive --before "Oct 21 2015"
+ # Times are also accepted
+ nova-manage db archive --before "2015-10-21 12:00"
+
+ Note that relative dates (such as ``yesterday``) are not supported
+ natively. The ``date`` command can be helpful here::
+
+ # Archive deleted rows more than one month old
+ nova-manage db archive --before "$(date -d 'now - 1 month')"
+
+.. option:: --verbose
+
+ Print how many rows were archived per table.
+
+.. option:: --until-complete
+
+ Run continuously until all deleted rows are archived.
+ Use :option:`--max_rows` as a batch size for each iteration.
+
+.. option:: --purge
+
+ Purge all data from shadow tables after archive completes.
+
+.. option:: --all-cells
+
+ Run command across all cells.
+
+.. option:: --task-log
+
+ Also archive ``task_log`` table records. Note that ``task_log`` records are
+ never deleted, so archiving them will move all of the ``task_log`` records
+ up to now into the shadow tables. It is recommended to also specify the
+ :option:`--before` option to avoid races for those consuming ``task_log``
+ record data via the `/os-instance_usage_audit_log`__ API (example:
+ Telemetry).
+
+ .. __: https://docs.openstack.org/api-ref/compute/#server-usage-audit-log-os-instance-usage-audit-log
+
+.. option:: --sleep
+
+ The amount of time in seconds to sleep between batches when
+ :option:`--until-complete` is used. Defaults to 0.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Nothing was archived.
+ * - 1
+ - Some number of rows were archived.
+ * - 2
+ - Invalid value for :option:`--max_rows`.
+ * - 3
+ - No connection to the API database could be established using
+ :oslo.config:option:`api_database.connection`.
+ * - 4
+ - Invalid value for :option:`--before`.
+ * - 255
+ - An unexpected error occurred.
+
+db purge
+--------
+
+.. program:: nova-manage db purge
+
+.. code-block:: shell
+
+ nova-manage db purge [--all] [--before ] [--verbose] [--all-cells]
+
+Delete rows from shadow tables. For :option:`--all-cells` to work, the API
+database connection information must be configured.
+
+.. versionadded:: 18.0.0 (Rocky)
+
+.. rubric:: Options
+
+.. option:: --all
+
+ Purge all rows in the shadow tables.
+
+.. option:: --before
+
+ Delete data that was archived before ````. Accepts date strings
+ in the default format output by the ``date`` command, as well as
+ ``YYYY-MM-DD[HH:mm:ss]``. For example::
+
+ # Purge shadow table rows older than a specific date
+ nova-manage db purge --before 2015-10-21
+ # or
+ nova-manage db purge --before "Oct 21 2015"
+ # Times are also accepted
+ nova-manage db purge --before "2015-10-21 12:00"
+
+ Note that relative dates (such as ``yesterday``) are not supported
+ natively. The ``date`` command can be helpful here::
+
+ # Archive deleted rows more than one month old
+ nova-manage db purge --before "$(date -d 'now - 1 month')"
+
+.. option:: --verbose
+
+ Print information about purged records.
+
+.. option:: --all-cells
+
+ Run against all cell databases.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Rows were deleted.
+ * - 1
+ - Required arguments were not provided.
+ * - 2
+ - Invalid value for :option:`--before`.
+ * - 3
+ - Nothing was purged.
+ * - 4
+ - No connection to the API database could be established using
+ :oslo.config:option:`api_database.connection`.
- nova-manage db
-
-These sections describe the available categories and arguments for nova-manage.
-
-Nova Database
-~~~~~~~~~~~~~
-
-``nova-manage db version``
- Print the current main database version.
-
-``nova-manage db sync [--version ] [--local_cell]``
- Upgrade the main database schema up to the most recent version or
- ``--version`` if specified. By default, this command will also attempt to
- upgrade the schema for the cell0 database if it is mapped (see the
- ``map_cell0`` or ``simple_cell_setup`` commands for more details on mapping
- the cell0 database). If ``--local_cell`` is specified, then only the main
- database in the current cell is upgraded. The local database connection is
- determined by ``[database]/connection`` in the configuration file passed to
- nova-manage.
-
-``nova-manage db archive_deleted_rows [--max_rows ] [--verbose] [--until-complete] [--purge]``
- Move deleted rows from production tables to shadow tables. Note that the
- corresponding rows in the instance_mappings and request_specs tables of the
- API database are purged when instance records are archived and thus,
- CONF.api_database.connection is required in the config file. Specifying
- --verbose will print the results of the archive operation for any tables that
- were changed. Specifying --until-complete will make the command run
- continuously until all deleted rows are archived. Use the --max_rows option,
- which defaults to 1000, as a batch size for each iteration. Specifying --purge
- will cause a `full` DB purge to be completed after archival. If a date range
- is desired for the purge, then run ``nova-manage db purge --before
- `` manually after archiving is complete.
-
-``nova-manage db purge [--all] [--before ] [--verbose] [--all-cells]``
- Delete rows from shadow tables. Specifying --all will delete all data from
- all shadow tables. Specifying --before will delete data from all shadow tables
- that is older than the date provided. Date strings may be fuzzy, such as
- ``Oct 21 2015``. Specifying --verbose will cause information to be printed about
- purged records. Specifying --all-cells will cause the purge to be applied against
- all cell databases. For --all-cells to work, the api database connection
- information must be configured. Returns exit code 0 if rows were deleted, 1 if
- required arguments are not provided, 2 if an invalid date is provided, 3 if no
- data was deleted, 4 if the list of cells cannot be obtained.
-
-``nova-manage db null_instance_uuid_scan [--delete]``
- Lists and optionally deletes database records where instance_uuid is NULL.
-
-``nova-manage db online_data_migrations [--max-count]``
- Perform data migration to update all live data. Return exit code 0 if
- migrations were successful or exit code 1 for partial updates. This command
- should be called after upgrading database schema and nova services on all
- controller nodes. If the command exits with partial updates (exit code 1)
- the command will need to be called again.
-
- ``--max-count`` controls the maximum number of objects to migrate in a given
- call. If not specified, migration will occur in batches of 50 until fully
- complete.
-
-``nova-manage db ironic_flavor_migration [--all] [--host] [--node] [--resource_class]``
- Perform the ironic flavor migration process against the database
- while services are offline. This is `not recommended` for most
- people. The ironic compute driver will do this online and as
- necessary if run normally. This routine is provided only for
- advanced users that may be skipping the 16.0.0 Pike release, never
- able to run services normally at the Pike level. Since this utility
- is for use when all services (including ironic) are down, you must
- pass the resource class set on your node(s) with the
- ``--resource_class`` parameter.
-
- To migrate a specific host and node, provide the hostname and node uuid with
- ``--host $hostname --node $uuid``. To migrate all instances on nodes managed
- by a single host, provide only ``--host``. To iterate over all nodes in the
- system in a single pass, use ``--all``. Note that this process is not lightweight,
- so it should not be run frequently without cause, although it is not harmful
- to do so. If you have multiple cellsv2 cells, you should run this once per cell
- with the corresponding cell config for each (i.e. this does not iterate cells
- automatically).
-
- Note that this is not recommended unless you need to run this
- specific data migration offline, and it should be used with care as
- the work done is non-trivial. Running smaller and more targeted batches (such as
- specific nodes) is recommended.
-
-Nova API Database
-~~~~~~~~~~~~~~~~~
-
-``nova-manage api_db version``
- Print the current API database version.
-
-``nova-manage api_db sync [VERSION]``
- Upgrade the API database schema up to the most recent version or
- ``[VERSION]`` if specified. This command does not create the API
- database, it runs schema migration scripts. The API database connection is
- determined by ``[api_database]/connection`` in the configuration file
- passed to nova-manage.
-
- Starting in the 18.0.0 Rocky release, this command will also upgrade the
- optional placement database if ``[placement_database]/connection`` is
- configured.
+db online_data_migrations
+-------------------------
+
+.. program:: nova-manage db online_data_migrations
+
+.. code-block:: shell
+
+ nova-manage db online_data_migrations [--max-count ]
+
+Perform data migration to update all live data.
+
+This command should be called after upgrading database schema and nova services on
+all controller nodes. If it exits with partial updates (exit status 1) it should
+be called again, even if some updates initially generated errors, because some updates
+may depend on others having completed. If it exits with status 2, intervention is
+required to resolve the issue causing remaining updates to fail. It should be
+considered successfully completed only when the exit status is 0.
+
+For example::
+
+ $ nova-manage db online_data_migrations
+ Running batches of 50 until complete
+ 2 rows matched query migrate_instances_add_request_spec, 0 migrated
+ 2 rows matched query populate_queued_for_delete, 2 migrated
+ +---------------------------------------------+--------------+-----------+
+ | Migration | Total Needed | Completed |
+ +---------------------------------------------+--------------+-----------+
+ | create_incomplete_consumers | 0 | 0 |
+ | migrate_instances_add_request_spec | 2 | 0 |
+ | migrate_quota_classes_to_api_db | 0 | 0 |
+ | migrate_quota_limits_to_api_db | 0 | 0 |
+ | migration_migrate_to_uuid | 0 | 0 |
+ | populate_missing_availability_zones | 0 | 0 |
+ | populate_queued_for_delete | 2 | 2 |
+ | populate_uuids | 0 | 0 |
+ +---------------------------------------------+--------------+-----------+
+
+In the above example, the ``migrate_instances_add_request_spec`` migration
+found two candidate records but did not need to perform any kind of data
+migration for either of them. In the case of the
+``populate_queued_for_delete`` migration, two candidate records were found
+which did require a data migration. Since :option:`--max-count` defaults to 50
+and only two records were migrated with no more candidates remaining, the
+command completed successfully with exit code 0.
+
+.. versionadded:: 13.0.0 (Mitaka)
+
+.. rubric:: Options
+
+.. option:: --max-count
+
+ Controls the maximum number of objects to migrate in a given call. If not
+ specified, migration will occur in batches of 50 until fully complete.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - No (further) updates are possible.
+ * - 1
+ - Some updates were completed successfully. Note that not all updates may
+ have succeeded.
+ * - 2
+ - Some updates generated errors and no other migrations were able to take
+ effect in the last batch attempted.
+ * - 127
+ - Invalid input was provided.
+
+
+API Database Commands
+=====================
+
+api_db version
+--------------
+
+.. program:: nova-manage api_db version
+
+.. code-block:: shell
+
+ nova-manage api_db version
+
+Print the current API database version.
+
+.. versionadded:: 2015.1.0 (Kilo)
+
+api_db sync
+-----------
+
+.. program:: nova-manage api_db sync
+
+.. code-block:: shell
+
+ nova-manage api_db sync [VERSION]
+
+Upgrade the API database schema up to the most recent version or
+``VERSION`` if specified. This command does not create the API
+database, it runs schema migration scripts. The API database connection is
+determined by :oslo.config:option:`api_database.connection` in the
+configuration file passed to nova-manage.
+
+This command should be run before ``nova-manage db sync``.
+
+.. versionadded:: 2015.1.0 (Kilo)
+
+.. versionchanged:: 18.0.0 (Rocky)
+
+ Added support for upgrading the optional placement database if
+ ``[placement_database]/connection`` is configured.
+
+.. versionchanged:: 20.0.0 (Train)
+
+ Removed support for upgrading the optional placement database as placement
+ is now a separate project.
+
+ Removed support for the legacy ``--version `` argument.
+
+.. versionchanged:: 24.0.0 (Xena)
+
+ Migrated versioning engine to alembic. The optional ``VERSION`` argument is
+ now expected to be an alembic-based version. sqlalchemy-migrate-based
+ versions will be rejected.
.. _man-page-cells-v2:
-Nova Cells v2
-~~~~~~~~~~~~~
-
-``nova-manage cell_v2 simple_cell_setup [--transport-url ]``
- Setup a fresh cells v2 environment; this should not be used if you
- currently have a cells v1 environment. If a transport_url is not
- specified, it will use the one defined by ``[DEFAULT]/transport_url``
- in the configuration file. Returns 0 if setup is completed
- (or has already been done), 1 if no hosts are reporting (and cannot be
- mapped), 1 if the transport url is missing, and 2 if run in a cells v1
- environment.
-
-``nova-manage cell_v2 map_cell0 [--database_connection ]``
- Create a cell mapping to the database connection for the cell0 database.
- If a database_connection is not specified, it will use the one defined by
- ``[database]/connection`` in the configuration file passed to nova-manage.
- The cell0 database is used for instances that have not been scheduled to
- any cell. This generally applies to instances that have encountered an
- error before they have been scheduled. Returns 0 if cell0 is created
- successfully or already setup.
-
-``nova-manage cell_v2 map_instances --cell_uuid [--max-count ] [--reset]``
- Map instances to the provided cell. Instances in the nova database will
- be queried from oldest to newest and mapped to the provided cell. A
- max_count can be set on the number of instance to map in a single run.
- Repeated runs of the command will start from where the last run finished
- so it is not necessary to increase max-count to finish. A reset option
- can be passed which will reset the marker, thus making the command start
- from the beginning as opposed to the default behavior of starting from
- where the last run finished. Returns 0 if all instances have been mapped,
- and 1 if there are still instances to be mapped.
-
- If ``--max-count`` is not specified, all instances in the cell will be
- mapped in batches of 50. If you have a large number of instances, consider
- specifying a custom value and run the command until it exits with 0.
-
-``nova-manage cell_v2 map_cell_and_hosts [--name ] [--transport-url ] [--verbose]``
- Create a cell mapping to the database connection and message queue
- transport url, and map hosts to that cell. The database connection
- comes from the ``[database]/connection`` defined in the configuration
- file passed to nova-manage. If a transport_url is not specified, it will
- use the one defined by ``[DEFAULT]/transport_url`` in the configuration
- file. This command is idempotent (can be run multiple times), and the
- verbose option will print out the resulting cell mapping uuid. Returns 0
- on successful completion, and 1 if the transport url is missing.
-
-``nova-manage cell_v2 verify_instance --uuid [--quiet]``
- Verify instance mapping to a cell. This command is useful to determine if
- the cells v2 environment is properly setup, specifically in terms of the
- cell, host, and instance mapping records required. Returns 0 when the
- instance is successfully mapped to a cell, 1 if the instance is not
- mapped to a cell (see the ``map_instances`` command), 2 if the cell
- mapping is missing (see the ``map_cell_and_hosts`` command if you are
- upgrading from a cells v1 environment, and the ``simple_cell_setup`` if
- you are upgrading from a non-cells v1 environment), 3 if it is a deleted
- instance which has instance mapping, and 4 if it is an archived instance
- which still has an instance mapping.
-
-``nova-manage cell_v2 create_cell [--name ] [--transport-url ] [--database_connection ] [--verbose] [--disabled]``
- Create a cell mapping to the database connection and message queue
- transport url. If a database_connection is not specified, it will use the
- one defined by ``[database]/connection`` in the configuration file passed
- to nova-manage. If a transport_url is not specified, it will use the one
- defined by ``[DEFAULT]/transport_url`` in the configuration file. The
- verbose option will print out the resulting cell mapping uuid. All the
- cells created are by default enabled. However passing the ``--disabled`` option
- can create a pre-disabled cell, meaning no scheduling will happen to this
- cell. The meaning of the various exit codes returned by this command are
- explained below:
-
- * Returns 0 if the cell mapping was successfully created.
- * Returns 1 if the transport url or database connection was missing.
- * Returns 2 if another cell is already using that transport url and/or
- database connection combination.
-
-``nova-manage cell_v2 discover_hosts [--cell_uuid ] [--verbose] [--strict] [--by-service]``
- Searches cells, or a single cell, and maps found hosts. This command will
- check the database for each cell (or a single one if passed in) and map any
- hosts which are not currently mapped. If a host is already mapped nothing
- will be done. You need to re-run this command each time you add more
- compute hosts to a cell (otherwise the scheduler will never place instances
- there and the API will not list the new hosts). If the strict option is
- provided the command will only be considered successful if an unmapped host
- is discovered (exit code 0). Any other case is considered a failure (exit
- code 1). If --by-service is specified, this command will look in the
- appropriate cell(s) for any nova-compute services and ensure there are host
- mappings for them. This is less efficient and is only necessary when using
- compute drivers that may manage zero or more actual compute nodes at any
- given time (currently only ironic).
-
-``nova-manage cell_v2 list_cells [--verbose]``
- By default the cell name, uuid, disabled state, masked transport URL and
- database connection details are shown. Use the --verbose option to see
- transport URL and database connection with their sensitive details.
-
-``nova-manage cell_v2 delete_cell [--force] --cell_uuid ``
- Delete a cell by the given uuid. Returns 0 if the empty cell is found and
- deleted successfully or the cell that has hosts is found and the cell, hosts
- and the instance_mappings are deleted successfully with ``--force`` option
- (this happens if there are no living instances), 1 if a cell with that uuid
- could not be found, 2 if host mappings were found for the cell (cell not empty)
- without ``--force`` option, 3 if there are instances mapped to the cell
- (cell not empty) irrespective of the ``--force`` option, and 4 if there are
- instance mappings to the cell but all instances have been deleted in the cell,
- again without the ``--force`` option.
-
-``nova-manage cell_v2 list_hosts [--cell_uuid ]``
- Lists the hosts in one or all v2 cells. By default hosts in all v2 cells
- are listed. Use the --cell_uuid option to list hosts in a specific cell.
- If the cell is not found by uuid, this command will return an exit code
- of 1. Otherwise, the exit code will be 0.
-
-``nova-manage cell_v2 update_cell --cell_uuid [--name ] [--transport-url ] [--database_connection ] [--disable] [--enable]``
- Updates the properties of a cell by the given uuid. If a
- database_connection is not specified, it will attempt to use the one
- defined by ``[database]/connection`` in the configuration file. If a
- transport_url is not specified, it will attempt to use the one defined by
- ``[DEFAULT]/transport_url`` in the configuration file. The meaning of the
- various exit codes returned by this command are explained below:
-
- * If successful, it will return 0.
- * If the cell is not found by the provided uuid, it will return 1.
- * If the properties cannot be set, it will return 2.
- * If the provided transport_url or/and database_connection is/are same as
- another cell, it will return 3.
- * If an attempt is made to disable and enable a cell at the same time, it
- will return 4.
- * If an attempt is made to disable or enable cell0 it will return 5.
-
- .. note::
-
- Updating the ``transport_url`` or ``database_connection`` fields on a
- running system will NOT result in all nodes immediately using the new
- values. Use caution when changing these values.
-
- The scheduler will not notice that a cell has been enabled/disabled until
- it is restarted or sent the SIGHUP signal.
-
-``nova-manage cell_v2 delete_host --cell_uuid --host ``
- Delete a host by the given host name and the given cell uuid. Returns 0
- if the empty host is found and deleted successfully, 1 if a cell with
- that uuid could not be found, 2 if a host with that name could not be
- found, 3 if a host with that name is not in a cell with that uuid, 4 if
- a host with that name has instances (host not empty).
-
-
-Placement
-~~~~~~~~~
-
-``nova-manage placement heal_allocations [--max-count ] [--verbose]``
- Iterates over non-cell0 cells looking for instances which do not have
- allocations in the Placement service and which are not undergoing a task
- state transition. For each instance found, allocations are created against
- the compute node resource provider for that instance based on the flavor
- associated with the instance.
-
- There is also a special case handled for instances that *do* have
- allocations created before Placement API microversion 1.8 where project_id
- and user_id values were required. For those types of allocations, the
- project_id and user_id are updated using the values from the instance.
-
- Specify ``--max-count`` to control the maximum number of instances to
- process. If not specified, all instances in each cell will be mapped in
- batches of 50. If you have a large number of instances, consider
- specifying a custom value and run the command until it exits with 0 or 4.
-
- Specify ``--verbose`` to get detailed progress output during execution.
-
- This command requires that the ``[api_database]/connection`` and
- ``[placement]`` configuration options are set. Placement API >= 1.28 is
- required.
-
- Return codes:
-
- * 0: Command completed successfully and allocations were created.
- * 1: --max-count was reached and there are more instances to process.
- * 2: Unable to find a compute node record for a given instance.
- * 3: Unable to create (or update) allocations for an instance against its
- compute node resource provider.
- * 4: Command completed successfully but no allocations were created.
- * 127: Invalid input.
-
-``nova-manage placement sync_aggregates [--verbose]``
- Mirrors compute host aggregates to resource provider aggregates
- in the Placement service. Requires the ``[api_database]`` and
- ``[placement]`` sections of the nova configuration file to be
- populated.
-
- Specify ``--verbose`` to get detailed progress output during execution.
-
- .. note:: Depending on the size of your deployment and the number of
- compute hosts in aggregates, this command could cause a non-negligible
- amount of traffic to the placement service and therefore is
- recommended to be run during maintenance windows.
-
- .. versionadded:: Rocky
-
- Return codes:
-
- * 0: Successful run
- * 1: A host was found with more than one matching compute node record
- * 2: An unexpected error occurred while working with the placement API
- * 3: Failed updating provider aggregates in placement
- * 4: Host mappings not found for one or more host aggregate members
- * 5: Compute node records not found for one or more hosts
- * 6: Resource provider not found by uuid for a given host
+Cells v2 Commands
+=================
+
+cell_v2 simple_cell_setup
+-------------------------
+
+.. program:: nova-manage cell_v2 simple_cell_setup
+
+.. code-block:: shell
+
+ nova-manage cell_v2 simple_cell_setup [--transport-url ]
+
+Setup a fresh cells v2 environment. If :option:`--transport-url` is not
+specified, it will use the one defined by :oslo.config:option:`transport_url`
+in the configuration file.
+
+.. versionadded:: 14.0.0 (Newton)
+
+.. rubric:: Options
+
+.. option:: --transport-url
+
+ The transport url for the cell message queue.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Setup is completed.
+ * - 1
+ - No hosts are reporting, meaning none can be mapped, or if the transport
+ URL is missing or invalid.
+
+cell_v2 map_cell0
+-----------------
+
+.. program:: nova-manage cell_v2 map_cell0
+
+.. code-block:: shell
+
+ nova-manage cell_v2 map_cell0 [--database_connection ]
+
+Create a cell mapping to the database connection for the cell0 database.
+If a database_connection is not specified, it will use the one defined by
+:oslo.config:option:`database.connection` in the configuration file passed
+to nova-manage. The cell0 database is used for instances that have not been
+scheduled to any cell. This generally applies to instances that have
+encountered an error before they have been scheduled.
+
+.. versionadded:: 14.0.0 (Newton)
+
+.. rubric:: Options
+
+.. option:: --database_connection
+
+ The database connection URL for ``cell0``. This is optional. If not
+ provided, a standard database connection will be used based on the main
+ database connection from nova configuration.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - ``cell0`` is created successfully or has already been set up.
+
+cell_v2 map_instances
+---------------------
+
+.. program:: nova-manage cell_v2 map_instances
+
+.. code-block:: shell
+
+ nova-manage cell_v2 map_instances --cell_uuid
+ [--max-count ] [--reset]
+
+Map instances to the provided cell. Instances in the nova database will
+be queried from oldest to newest and mapped to the provided cell.
+A :option:`--max-count` can be set on the number of instance to map in a single
+run. Repeated runs of the command will start from where the last run finished
+so it is not necessary to increase :option:`--max-count` to finish.
+A :option:`--reset` option can be passed which will reset the marker, thus
+making the command start from the beginning as opposed to the default behavior
+of starting from where the last run finished.
+
+If :option:`--max-count` is not specified, all instances in the cell will be
+mapped in batches of 50. If you have a large number of instances, consider
+specifying a custom value and run the command until it exits with 0.
+
+.. versionadded:: 12.0.0 (Liberty)
+
+.. rubric:: Options
+
+.. option:: --cell_uuid
+
+ Unmigrated instances will be mapped to the cell with the UUID provided.
+
+.. option:: --max-count
+
+ Maximum number of instances to map. If not set, all instances in the cell
+ will be mapped in batches of 50. If you have a large number of instances,
+ consider specifying a custom value and run the command until it exits with
+ 0.
+
+.. option:: --reset
+
+ The command will start from the beginning as opposed to the default
+ behavior of starting from where the last run finished.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - All instances have been mapped.
+ * - 1
+ - There are still instances to be mapped.
+ * - 127
+ - Invalid value for :option:`--max-count`.
+ * - 255
+ - An unexpected error occurred.
+
+cell_v2 map_cell_and_hosts
+--------------------------
+
+.. program:: nova-manage cell_v2 map_cell_and_hosts
+
+.. code-block:: shell
+
+ nova-manage cell_v2 map_cell_and_hosts [--name ]
+ [--transport-url ] [--verbose]
+
+Create a cell mapping to the database connection and message queue
+transport URL, and map hosts to that cell. The database connection
+comes from the :oslo.config:option:`database.connection` defined in the
+configuration file passed to nova-manage. If :option:`--transport-url` is not
+specified, it will use the one defined by
+:oslo.config:option:`transport_url` in the configuration file. This command
+is idempotent (can be run multiple times), and the verbose option will
+print out the resulting cell mapping UUID.
+
+.. versionadded:: 13.0.0 (Mitaka)
+
+.. rubric:: Options
+
+.. option:: --transport-url
+
+ The transport url for the cell message queue.
+
+.. option:: --name
+
+ The name of the cell.
+
+.. option:: --verbose
+
+ Output the cell mapping uuid for any newly mapped hosts.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Successful completion.
+ * - 1
+ - The transport url is missing or invalid
+
+cell_v2 verify_instance
+-----------------------
+
+.. program:: nova-manage cell_v2 verify_instance
+
+.. code-block:: shell
+
+ nova-manage cell_v2 verify_instance --uuid [--quiet]
+
+Verify instance mapping to a cell. This command is useful to determine if
+the cells v2 environment is properly setup, specifically in terms of the
+cell, host, and instance mapping records required.
+
+.. versionadded:: 14.0.0 (Newton)
+
+.. rubric:: Options
+
+.. option:: --uuid
+
+ The instance UUID to verify.
+
+.. option:: --quiet
+
+ Do not print anything.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - The instance was successfully mapped to a cell.
+ * - 1
+ - The instance is not mapped to a cell. See the ``map_instances``
+ command.
+ * - 2
+ - The cell mapping is missing. See the ``map_cell_and_hots`` command if
+ you are upgrading from a cells v1 environment, and the
+ ``simple_cell_setup`` command if you are upgrading from a non-cells v1
+ environment.
+ * - 3
+ - The instance is a deleted instance that still has an instance mapping.
+ * - 4
+ - The instance is an archived instance that still has an instance mapping.
+
+cell_v2 create_cell
+-------------------
+
+.. program:: nova-manage cell_v2 create_cell
+
+.. code-block:: shell
+
+ nova-manage cell_v2 create_cell [--name ]
+ [--transport-url ]
+ [--database_connection ] [--verbose] [--disabled]
+
+Create a cell mapping to the database connection and message queue
+transport URL. If a database_connection is not specified, it will use the
+one defined by :oslo.config:option:`database.connection` in the
+configuration file passed to nova-manage. If :option:`--transport-url` is not
+specified, it will use the one defined by
+:oslo.config:option:`transport_url` in the configuration file. The verbose
+option will print out the resulting cell mapping UUID. All the cells
+created are by default enabled. However passing the :option:`--disabled` option
+can create a pre-disabled cell, meaning no scheduling will happen to this
+cell.
+
+.. versionadded:: 15.0.0 (Ocata)
+
+.. versionchanged:: 18.0.0 (Rocky)
+
+ Added :option:`--disabled` option.
+
+.. rubric:: Options
+
+.. option:: --name
+
+ The name of the cell.
+
+.. option:: --database_connection
+
+ The database URL for the cell database.
+
+.. option:: --transport-url
+
+ The transport url for the cell message queue.
+
+.. option:: --verbose
+
+ Output the UUID of the created cell.
+
+.. option:: --disabled
+
+ Create a pre-disabled cell.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - The cell mapping was successfully created.
+ * - 1
+ - The transport URL or database connection was missing or invalid.
+ * - 2
+ - Another cell is already using the provided transport URL and/or database
+ connection combination.
+
+cell_v2 discover_hosts
+----------------------
+
+.. program:: nova-manage cell_v2 discover_hosts
+
+.. code-block:: shell
+
+ nova-manage cell_v2 discover_hosts [--cell_uuid ] [--verbose]
+ [--strict] [--by-service]
+
+Searches cells, or a single cell, and maps found hosts. This command will
+check the database for each cell (or a single one if passed in) and map any
+hosts which are not currently mapped. If a host is already mapped, nothing
+will be done. You need to re-run this command each time you add a batch of
+compute hosts to a cell (otherwise the scheduler will never place instances
+there and the API will not list the new hosts). If :option:`--strict` is
+specified, the command will only return 0 if an unmapped host was discovered
+and mapped successfully. If :option:`--by-service` is specified, this command will
+look in the appropriate cell(s) for any nova-compute services and ensure there
+are host mappings for them. This is less efficient and is only necessary
+when using compute drivers that may manage zero or more actual compute
+nodes at any given time (currently only ironic).
+
+This command should be run once after all compute hosts have been deployed
+and should not be run in parallel. When run in parallel, the commands will
+collide with each other trying to map the same hosts in the database at the
+same time.
+
+.. versionadded:: 14.0.0 (Newton)
+
+.. versionchanged:: 16.0.0 (Pike)
+
+ Added :option:`--strict` option.
+
+.. versionchanged:: 18.0.0 (Rocky)
+
+ Added :option:`--by-service` option.
+
+.. rubric:: Options
+
+.. option:: --cell_uuid
+
+ If provided only this cell will be searched for new hosts to map.
+
+.. option:: --verbose
+
+ Provide detailed output when discovering hosts.
+
+.. option:: --strict
+
+ Considered successful (exit code 0) only when an unmapped host is
+ discovered. Any other outcome will be considered a failure (non-zero exit
+ code).
+
+.. option:: --by-service
+
+ Discover hosts by service instead of compute node.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Hosts were successfully mapped or no hosts needed to be mapped. If
+ :option:`--strict` is specified, returns 0 only if an unmapped host was
+ discovered and mapped.
+ * - 1
+ - If :option:`--strict` is specified and no unmapped hosts were found.
+ Also returns 1 if an exception was raised while running.
+ * - 2
+ - The command was aborted because of a duplicate host mapping found. This
+ means the command collided with another running ``discover_hosts``
+ command or scheduler periodic task and is safe to retry.
+
+cell_v2 list_cells
+------------------
+
+.. program:: nova-manage cell_v2 list_cells
+
+.. code-block:: shell
+
+ nova-manage cell_v2 list_cells [--verbose]
+
+By default the cell name, UUID, disabled state, masked transport URL and
+database connection details are shown. Use the :option:`--verbose` option to
+see transport URL and database connection with their sensitive details.
+
+.. versionadded:: 15.0.0 (Ocata)
+
+.. versionchanged:: 18.0.0 (Rocky)
+
+ Added the ``disabled`` column to output.
+
+.. rubric:: Options
+
+.. option:: --verbose
+
+ Show sensitive details, such as passwords.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Success.
+
+cell_v2 delete_cell
+-------------------
+
+.. program:: nova-manage cell_v2 delete_cell
+
+.. code-block:: shell
+
+ nova-manage cell_v2 delete_cell [--force] --cell_uuid
+
+Delete a cell by the given UUID.
+
+.. versionadded:: 15.0.0 (Ocata)
+
+.. rubric:: Options
+
+.. option:: --force
+
+ Delete hosts and instance_mappings that belong to the cell as well.
+
+.. option:: --cell_uuid
+
+ The UUID of the cell to delete.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - An empty cell was found and deleted successfully or a cell that has
+ hosts was found and the cell, hosts and the instance_mappings were
+ deleted successfully with :option:`--force` option (this happens if there are
+ no living instances).
+ * - 1
+ - A cell with the provided UUID could not be found.
+ * - 2
+ - Host mappings were found for the cell, meaning the cell is not empty,
+ and the :option:`--force` option was not provided.
+ * - 3
+ - There are active instances mapped to the cell (cell not empty).
+ * - 4
+ - There are (inactive) instances mapped to the cell and the
+ :option:`--force` option was not provided.
+
+cell_v2 list_hosts
+------------------
+
+.. program:: nova-manage cell_v2 list_hosts
+
+.. code-block:: shell
+
+ nova-manage cell_v2 list_hosts [--cell_uuid ]
+
+Lists the hosts in one or all v2 cells. By default hosts in all v2 cells
+are listed. Use the :option:`--cell_uuid` option to list hosts in a specific cell.
+
+.. versionadded:: 17.0.0 (Queens)
+
+.. rubric:: Options
+
+.. option:: --cell_uuid
+
+ The UUID of the cell.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Success.
+ * - 1
+ - The cell indicated by :option:`--cell_uuid` was not found.
+
+cell_v2 update_cell
+-------------------
+
+.. program:: nova-manage cell_v2 update_cell
+
+.. code-block:: shell
+
+ nova-manage cell_v2 update_cell --cell_uuid
+ [--name ] [--transport-url ]
+ [--database_connection ] [--disable] [--enable]
+
+Updates the properties of a cell by the given uuid. If a
+database_connection is not specified, it will attempt to use the one
+defined by :oslo.config:option:`database.connection` in the configuration
+file. If a transport_url is not specified, it will attempt to use the one
+defined by :oslo.config:option:`transport_url` in the configuration file.
+
+.. note::
+
+ Updating the ``transport_url`` or ``database_connection`` fields on a
+ running system will NOT result in all nodes immediately using the new
+ values. Use caution when changing these values.
+
+ The scheduler will not notice that a cell has been enabled/disabled until
+ it is restarted or sent the SIGHUP signal.
+
+.. versionadded:: 16.0.0 (Pike)
+
+.. versionchanged:: 18.0.0 (Rocky)
+
+ Added :option:`--enable`, :option:`--disable` options.
+
+.. rubric:: Options
+
+.. option:: --cell_uuid
+
+ The UUID of the cell to update.
+
+.. option:: --name
+
+ Set the cell name.
+
+.. option:: --transport-url
+
+ Set the cell ``transport_url``. Note that running nodes will not see
+ the change until restarted or the ``SIGHUP`` signal is sent.
+
+.. option:: --database_connection
+
+ Set the cell ``database_connection``. Note that running nodes will not see
+ the change until restarted or the ``SIGHUP`` signal is sent.
+
+.. option:: --disable
+
+ Disables the cell. Note that the scheduling will be blocked to this cell
+ until it is enabled and the ``nova-scheduler`` service is restarted or
+ the ``SIGHUP`` signal is sent.
+
+.. option:: --enable
+
+ Enables the cell. Note that the ``nova-scheduler`` service will not see the
+ change until it is restarted or the ``SIGHUP`` signal is sent.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Success.
+ * - 1
+ - The cell was not found by the provided UUID.
+ * - 2
+ - The specified properties could not be set.
+ * - 3
+ - The provided :option:`--transport-url` or/and
+ :option:`--database_connection` parameters were same as another cell.
+ * - 4
+ - An attempt was made to disable and enable a cell at the same time.
+ * - 5
+ - An attempt was made to disable or enable cell0.
+
+cell_v2 delete_host
+-------------------
+
+.. program:: nova-manage cell_v2 delete_host
+
+.. code-block:: shell
+
+ nova-manage cell_v2 delete_host --cell_uuid --host
+
+Delete a host by the given host name and the given cell UUID.
+
+.. versionadded:: 17.0.0 (Queens)
+
+.. note::
+
+ The scheduler caches host-to-cell mapping information so when deleting
+ a host the scheduler may need to be restarted or sent the SIGHUP signal.
+
+.. rubric:: Options
+
+.. option:: --cell_uuid
+
+ The UUID of the cell.
+
+.. option:: --host
+
+ The host to delete.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - The empty host was found and deleted successfully
+ * - 1
+ - A cell with the specified UUID could not be found.
+ * - 2
+ - A host with the specified name could not be found
+ * - 3
+ - The host with the specified name is not in a cell with the specified UUID.
+ * - 4
+ - The host with the specified name has instances (host not empty).
+
+Placement Commands
+==================
+
+.. _heal_allocations_cli:
+
+placement heal_allocations
+--------------------------
+
+.. program:: nova-manage placement heal_allocations
+
+.. code-block:: shell
+
+ nova-manage placement heal_allocations [--max-count ]
+ [--verbose] [--skip-port-allocations] [--dry-run]
+ [--instance ] [--cell `) but the corresponding
+allocation is not found then the allocation is created against the
+network device resource providers according to the resource request of
+that port. It is possible that the missing allocation cannot be created
+either due to not having enough resource inventory on the host the instance
+resides on or because more than one resource provider could fulfill the
+request. In this case the instance needs to be manually deleted or the
+port needs to be detached. When nova `supports migrating instances
+with guaranteed bandwidth ports`__, migration will heal missing allocations
+for these instances.
+
+.. __: https://specs.openstack.org/openstack/nova-specs/specs/train/approved/support-move-ops-with-qos-ports.html
+
+Before the allocations for the ports are persisted in placement nova-manage
+tries to update each port in neutron to refer to the resource provider UUID
+which provides the requested resources. If any of the port updates fail in
+neutron or the allocation update fails in placement the command tries to
+roll back the partial updates to the ports. If the roll back fails
+then the process stops with exit code ``7`` and the admin needs to do the
+rollback in neutron manually according to the description in the exit code
+section.
+
+There is also a special case handled for instances that *do* have
+allocations created before Placement API microversion 1.8 where project_id
+and user_id values were required. For those types of allocations, the
+project_id and user_id are updated using the values from the instance.
+
+This command requires that the
+:oslo.config:option:`api_database.connection` and
+:oslo.config:group:`placement` configuration options are set. Placement API
+>= 1.28 is required.
+
+.. versionadded:: 18.0.0 (Rocky)
+
+.. versionchanged:: 20.0.0 (Train)
+
+ Added :option:`--dry-run`, :option:`--instance`, and
+ :option:`--skip-port-allocations` options.
+
+.. versionchanged:: 21.0.0 (Ussuri)
+
+ Added :option:`--cell` option.
+
+.. versionchanged:: 22.0.0 (Victoria)
+
+ Added :option:`--force` option.
+
+.. rubric:: Options
+
+.. option:: --max-count
+
+ Maximum number of instances to process. If not specified, all instances in
+ each cell will be mapped in batches of 50. If you have a large number of
+ instances, consider specifying a custom value and run the command until it
+ exits with 0 or 4.
+
+.. option:: --verbose
+
+ Provide verbose output during execution.
+
+.. option:: --dry-run
+
+ Runs the command and prints output but does not commit any changes. The
+ return code should be 4.
+
+.. option:: --instance
+
+ UUID of a specific instance to process. If specified :option:`--max-count`
+ has no effect. Mutually exclusive with :option:`--cell`.
+
+.. option:: --skip-port-allocations
+
+ Skip the healing of the resource allocations of bound ports. E.g. healing
+ bandwidth resource allocation for ports having minimum QoS policy rules
+ attached. If your deployment does not use such a feature then the
+ performance impact of querying neutron ports for each instance can be
+ avoided with this flag.
+
+.. option:: --cell
+
+ Heal allocations within a specific cell. Mutually exclusive with
+ :option:`--instance`.
+
+.. option:: --force
+
+ Force heal allocations. Requires the :option:`--instance` argument.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Command completed successfully and allocations were created.
+ * - 1
+ - :option:`--max-count` was reached and there are more instances to
+ process.
+ * - 2
+ - Unable to find a compute node record for a given instance.
+ * - 3
+ - Unable to create (or update) allocations for an instance against its
+ compute node resource provider.
+ * - 4
+ - Command completed successfully but no allocations were created.
+ * - 5
+ - Unable to query ports from neutron
+ * - 6
+ - Unable to update ports in neutron
+ * - 7
+ - Cannot roll back neutron port updates. Manual steps needed. The
+ error message will indicate which neutron ports need to be changed
+ to clean up ``binding:profile`` of the port::
+
+ $ openstack port unset --binding-profile allocation
+
+ * - 127
+ - Invalid input.
+ * - 255
+ - An unexpected error occurred.
+
+.. _sync_aggregates_cli:
+
+placement sync_aggregates
+-------------------------
+
+.. program:: nova-manage placement sync_aggregates
+
+.. code-block:: shell
+
+ nova-manage placement sync_aggregates [--verbose]
+
+Mirrors compute host aggregates to resource provider aggregates
+in the Placement service. Requires the :oslo.config:group:`api_database`
+and :oslo.config:group:`placement` sections of the nova configuration file
+to be populated.
+
+Specify :option:`--verbose` to get detailed progress output during execution.
+
+.. note::
+
+ Depending on the size of your deployment and the number of
+ compute hosts in aggregates, this command could cause a non-negligible
+ amount of traffic to the placement service and therefore is
+ recommended to be run during maintenance windows.
+
+.. versionadded:: 18.0.0 (Rocky)
+
+.. rubric:: Options
+
+.. option:: --verbose
+
+ Provide verbose output during execution.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Successful run
+ * - 1
+ - A host was found with more than one matching compute node record
+ * - 2
+ - An unexpected error occurred while working with the placement API
+ * - 3
+ - Failed updating provider aggregates in placement
+ * - 4
+ - Host mappings not found for one or more host aggregate members
+ * - 5
+ - Compute node records not found for one or more hosts
+ * - 6
+ - Resource provider not found by uuid for a given host
+ * - 255
+ - An unexpected error occurred.
+
+placement audit
+---------------
+
+.. program:: nova-manage placement audit
+
+.. code-block:: shell
+
+ nova-manage placement audit [--verbose] [--delete]
+ [--resource_provider ]
+
+Iterates over all the Resource Providers (or just one if you provide the
+UUID) and then verifies if the compute allocations are either related to
+an existing instance or a migration UUID. If not, it will tell which
+allocations are orphaned.
+
+This command requires that the
+:oslo.config:option:`api_database.connection` and
+:oslo.config:group:`placement` configuration options are set. Placement API
+>= 1.14 is required.
+
+.. versionadded:: 21.0.0 (Ussuri)
+
+.. rubric:: Options
+
+.. option:: --verbose
+
+ Provide verbose output during execution.
+
+.. option:: --resource_provider