diff --git a/.gitignore b/.gitignore index f254e054597..8b87e4d8582 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ *~ .autogenerated .coverage +.mypy_cache .nova-venv .project .pydevproject @@ -33,7 +34,6 @@ doc/source/api/* doc/build/* api-guide/build/* api-ref/build/* -placement-api-ref/build/* etc/nova/nova.conf.sample etc/nova/policy.yaml.sample etc/nova/policy.yaml.merged @@ -48,7 +48,6 @@ nova/vcsversion.py tools/conf/nova.conf* doc/source/_static/nova.conf.sample doc/source/_static/nova.policy.yaml.sample -doc/source/_static/placement.policy.yaml.sample # Files created by releasenotes build releasenotes/build diff --git a/.gitreview b/.gitreview index 3a2f61c4b05..665a744a715 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,5 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 project=openstack/nova.git +defaultbranch=stable/xena diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..d02bdbdfca7 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,40 @@ +--- +default_language_version: + # force all unspecified python hooks to run python3 + python: python3 +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: trailing-whitespace + - id: mixed-line-ending + args: ['--fix', 'lf'] + exclude: '.*\.(svg)$' + - id: check-byte-order-marker + - id: check-executables-have-shebangs + - id: check-merge-conflict + - id: debug-statements + # nova/cmd/manage.py imports pdb on purpose. + exclude: 'nova/cmd/manage.py' + - id: check-yaml + files: .*\.(yaml|yml)$ + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.1.13 + hooks: + - id: remove-tabs + exclude: '.*\.(svg)$' + - repo: local + hooks: + - id: flake8 + name: flake8 + additional_dependencies: + - hacking>=3.1.0,<3.2.0 + language: python + entry: flake8 + files: '^.*\.py$' + exclude: '^(doc|releasenotes|tools)/.*$' + - repo: https://github.com/pre-commit/mirrors-autopep8 + rev: v1.6.0 + hooks: + - id: autopep8 + files: '^.*\.py$' diff --git a/.zuul.yaml b/.zuul.yaml index 6cab0ca2500..b0dd326ff55 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,110 +1,225 @@ # See https://docs.openstack.org/infra/manual/drivers.html#naming-with-zuul-v3 # for job naming conventions. + - job: - name: nova-dsvm-base - parent: legacy-dsvm-base + name: nova-tox-functional-centos8-py36 + parent: openstack-tox-functional-py36 + nodeset: devstack-single-node-centos-8-stream description: | - The base job definition for nova devstack/tempest jobs. - Contains common configuration. - timeout: 10800 + Run tox-based functional tests for the OpenStack Nova project + under cPython version 3.6 with Nova specific irrelevant-files list. + Uses tox with the ``functional-py36`` environment. + + This job also provides a parent for other projects to run the nova + functional tests on their own changes. required-projects: - - openstack-infra/devstack-gate + # including nova here makes this job reusable by other projects - openstack/nova - - openstack/tempest - irrelevant-files: &dsvm-irrelevant-files - - ^(placement-)?api-.*$ - - ^(test-|)requirements.txt$ + - openstack/placement + irrelevant-files: &functional-irrelevant-files - ^.*\.rst$ - - ^.git.*$ - - ^doc/.*$ - - ^nova/hacking/.*$ + - ^api-.*$ + - ^doc/(source|test)/.*$ - ^nova/locale/.*$ - - ^nova/tests/.*$ - ^releasenotes/.*$ - - ^setup.cfg$ - - ^tests-py3.txt$ - - ^tools/.*$ - - ^tox.ini$ + vars: + # explicitly stating the work dir makes this job reusable by other + # projects + zuul_work_dir: src/opendev.org/openstack/nova + bindep_profile: test py36 + timeout: 3600 + # NOTE(elod.illes): this job started to fail in stable/xena so let's + # set it non-voting to unblock the gate. + voting: false - job: - name: nova-dsvm-multinode-base - parent: legacy-dsvm-base-multinode + name: nova-tox-functional-py38 + parent: openstack-tox-functional-py38 description: | - Base job for multinode nova devstack/tempest jobs. - Will setup firewall rules on all the nodes allowing them to talk to - each other. - timeout: 10800 + Run tox-based functional tests for the OpenStack Nova project + under cPython version 3.8 with Nova specific irrelevant-files list. + Uses tox with the ``functional-py38`` environment. + + This job also provides a parent for other projects to run the nova + functional tests on their own changes. required-projects: - - openstack-infra/devstack-gate + # including nova here makes this job reusable by other projects - openstack/nova - - openstack/tempest - irrelevant-files: *dsvm-irrelevant-files - nodeset: legacy-ubuntu-xenial-2-node + - openstack/placement + irrelevant-files: *functional-irrelevant-files + vars: + # explicitly stating the work dir makes this job reusable by other + # projects + zuul_work_dir: src/opendev.org/openstack/nova + bindep_profile: test py38 + timeout: 3600 - job: - name: nova-tox-functional - parent: openstack-tox + name: nova-tox-functional-py39 + parent: openstack-tox-functional-py39 description: | - Run tox-based functional tests for the OpenStack Nova project with Nova - specific irrelevant-files list. Uses tox with the ``functional`` - environment. - irrelevant-files: &functional-irrelevant-files - - ^.*\.rst$ - - ^api-.*$ - - ^doc/source/.*$ - - ^nova/locale/.*$ - - ^placement-api-ref/.*$ - - ^releasenotes/.*$ + Run tox-based functional tests for the OpenStack Nova project + under cPython version 3.9 with Nova specific irrelevant-files list. + Uses tox with the ``functional-py39`` environment. + + This job also provides a parent for other projects to run the nova + functional tests on their own changes. + required-projects: + # including nova here makes this job reusable by other projects + - openstack/nova + - openstack/placement + irrelevant-files: *functional-irrelevant-files vars: - tox_envlist: functional + # explicitly stating the work dir makes this job reusable by other + # projects + zuul_work_dir: src/opendev.org/openstack/nova + bindep_profile: test py39 timeout: 3600 - job: - name: nova-tox-functional-py35 + name: nova-tox-validate-backport parent: openstack-tox description: | - Run tox-based functional tests for the OpenStack Nova project - under cPython version 3.5. with Nova specific irrelevant-files list. - Uses tox with the ``functional-py35`` environment. - irrelevant-files: *functional-irrelevant-files + Determine whether a backport is ready to be merged by checking whether it + has already been merged to master or more recent stable branches. + + Uses tox with the ``validate-backport`` environment. vars: - tox_envlist: functional-py35 - timeout: 3600 + tox_envlist: validate-backport - job: - name: nova-caching-scheduler - parent: nova-dsvm-base + name: nova-live-migration + parent: tempest-multinode-full-py3 description: | - Run non-slow Tempest API and scenario tests using the CachingScheduler. - run: playbooks/legacy/nova-caching-scheduler/run.yaml - post-run: playbooks/legacy/nova-caching-scheduler/post.yaml + Run tempest live migration tests against local qcow2 ephemeral storage + and shared LVM/iSCSI cinder volumes. + irrelevant-files: &nova-base-irrelevant-files + - ^api-.*$ + - ^(test-|)requirements.txt$ + - ^.*\.rst$ + - ^.git.*$ + - ^doc/.*$ + - ^nova/hacking/.*$ + - ^nova/locale/.*$ + - ^nova/policies/.*$ + - ^nova/tests/.*$ + - ^nova/test.py$ + - ^releasenotes/.*$ + - ^setup.cfg$ + - ^tools/.*$ + - ^tox.ini$ + + vars: + tox_envlist: all + tempest_test_regex: (^tempest\.api\.compute\.admin\.(test_live_migration|test_migration)) + devstack_services: + neutron-trunk: true + devstack_local_conf: + test-config: + $TEMPEST_CONFIG: + compute-feature-enabled: + volume_backed_live_migration: true + block_migration_for_live_migration: true + # NOTE(lyarwood): Skip until bug #1931702 is resolved. + block_migrate_cinder_iscsi: false + post-run: playbooks/nova-live-migration/post-run.yaml + # NOTE(lyarwood): This job is now non-voting until bug #1912310 is resolved + # within libvirt/QEMU. + voting: false - job: - name: nova-cells-v1 - parent: nova-dsvm-base - run: playbooks/legacy/nova-cells-v1/run.yaml - post-run: playbooks/legacy/nova-cells-v1/post.yaml + name: nova-ovs-hybrid-plug + parent: tempest-multinode-full-py3 + description: | + Run move operations, reboot, and evacuation (via the same post-run hook + as the nova-live-migration job) tests with the OVS network backend and + the "iptables_hybrid" securitygroup firewall driver, aka "hybrid plug". + The external events interactions between Nova and Neutron in these + situations has historically been fragile. This job exercises them. + irrelevant-files: *nova-base-irrelevant-files + vars: + tox_envlist: all + tempest_test_regex: (^tempest\..*compute\..*(migration|resize|reboot).*) + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge + ML2_L3_PLUGIN: router + devstack_services: + # Disable OVN services + br-ex-tcpdump: false + br-int-flows: false + ovn-controller: false + ovn-northd: false + q-ovn-metadata-agent: false + # Neutron services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + devstack_local_conf: + post-config: + "/$NEUTRON_CORE_PLUGIN_CONF": + securitygroup: + firewall_driver: iptables_hybrid + group-vars: + subnode: + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge + ML2_L3_PLUGIN: router + devstack_services: + # Disable OVN services + br-ex-tcpdump: false + br-int-flows: false + ovn-controller: false + ovn-northd: false + ovs-vswitchd: false + ovsdb-server: false + q-ovn-metadata-agent: false + # Neutron services + q-agt: true + devstack_local_conf: + post-config: + "/$NEUTRON_CORE_PLUGIN_CONF": + securitygroup: + firewall_driver: iptables_hybrid + post-run: playbooks/nova-live-migration/post-run.yaml - job: - name: nova-live-migration - parent: nova-dsvm-multinode-base - run: playbooks/legacy/nova-live-migration/run.yaml - post-run: playbooks/legacy/nova-live-migration/post.yaml + name: nova-live-migration-ceph + parent: devstack-plugin-ceph-multinode-tempest-py3 + description: | + Run tempest live migration tests against ceph ephemeral storage and + cinder volumes. + irrelevant-files: *nova-base-irrelevant-files + vars: + tox_envlist: all + tempest_test_regex: (^tempest\.api\.compute\.admin\.(test_live_migration|test_migration)) + devstack_local_conf: + test-config: + $TEMPEST_CONFIG: + compute-feature-enabled: + volume_backed_live_migration: true + block_migration_for_live_migration: false + block_migrate_cinder_iscsi: false + post-run: playbooks/nova-live-migration/post-run.yaml + # NOTE(lyarwood): This job is now non-voting until bug #1912310 is resolved + # within libvirt/QEMU. + voting: false - job: name: nova-lvm - parent: nova-dsvm-base + parent: devstack-tempest description: | Run tempest compute API tests using LVM image backend. This only runs against nova/virt/libvirt/* changes. - run: playbooks/legacy/nova-lvm/run.yaml - post-run: playbooks/legacy/nova-lvm/post.yaml - # Copy irrelevant-files from nova-dsvm-base and then exclude anything - # that is not in the nova/virt/libvirt/* tree (besides the actual zuul - # playbook and tempest rc files so this can be self-testing). + # Copy irrelevant-files from nova-dsvm-multinode-base and then exclude + # anything that is not in nova/virt/libvirt/* or nova/privsep/*. irrelevant-files: - - ^(?!.zuul.yaml)(?!playbooks/legacy/nova-lvm/)(?!devstack/tempest-dsvm-lvm-rc)(?!nova/virt/libvirt/).*$ - - ^(placement-)?api-.*$ + - ^(?!.zuul.yaml)(?!nova/virt/libvirt/)(?!nova/privsep/).*$ + - ^api-.*$ - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^.git.*$ @@ -112,29 +227,49 @@ - ^nova/hacking/.*$ - ^nova/locale/.*$ - ^nova/tests/.*$ + - ^nova/test.py$ - ^releasenotes/.*$ - ^setup.cfg$ - - ^tests-py3.txt$ - ^tools/.*$ - ^tox.ini$ - # TODO(mriedem): Make this voting and gating once bug 1771700 is fixed - # and we've had enough runs to feel comfortable with this setup. - voting: false - -- job: - name: nova-multiattach - parent: nova-dsvm-base - description: | - Run tempest integration tests with volume multiattach support enabled. - This job will only work starting with Queens. - It uses the default Cinder volume type in devstack (lvm) and the - default compute driver in devstack (libvirt). - run: playbooks/legacy/nova-multiattach/run.yaml - post-run: playbooks/legacy/nova-multiattach/post.yaml + vars: + # We use the "all" environment for tempest_test_regex and + # tempest_exclude_regex. + tox_envlist: all + # Only run compute API tests. + tempest_test_regex: ^tempest\.api\.compute + # Skip slow tests. + tempest_exclude_regex: .*\[.*\bslow\b.*\] + devstack_local_conf: + test-config: + $TEMPEST_CONFIG: + compute-feature-enabled: + # NOTE(mriedem): resize of non-volume-backed lvm instances does + # not yet work (bug 1831657). + resize: false + cold_migration: false + devstack_localrc: + NOVA_BACKEND: LVM + # Do not waste time clearing volumes. + LVM_VOLUME_CLEAR: none + # Disable SSH validation in tests to save time. + TEMPEST_RUN_VALIDATION: false + # Increase the size of the swift loopback device to accommodate RAW + # snapshots from the LV based instance disks. + # See bug #1913451 for more details. + SWIFT_LOOPBACK_DISK_SIZE: 24G + # As above, increase the total image limit per tenant to 10G + GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 10240 + devstack_services: + # Disable non-essential services that we don't need for this job. + c-bak: false +# TODO(lucasagomes): Move this job to ML2/OVN when QoS Minimum Bandwidth +# support is implemented. +# See: https://docs.openstack.org/neutron/latest/ovn/gaps.html - job: name: nova-next - parent: nova-dsvm-base + parent: tempest-multinode-full-py3 description: | This job was added in Newton when placement and cellsv2 were optional. Placement and cellsv2 are required starting in Ocata. In @@ -143,44 +278,421 @@ post-test scripts to ensure those scripts are still working, e.g. archive_deleted_rows. In Queens, this job started testing the TLS console proxy code in the libvirt driver. - run: playbooks/legacy/nova-next/run.yaml - post-run: playbooks/legacy/nova-next/post.yaml + Starting in Stein, the job was changed to run with python 3 and enabled + volume multi-attach testing. + Starting in Train, the job enabled counting quota usage from placement. + Starting in Ussuri, the job was changed to multinode. + Starting in Wallaby, the job defaults to the q35 machine type. + Runs all tempest compute API and most scenario tests concurrently. + irrelevant-files: *nova-base-irrelevant-files + # Run post-tempest tests like for nova-manage commands. + post-run: playbooks/nova-next/post.yaml + vars: + # We use the "all" environment for tempest_test_regex and + # tempest_exclude_regex. + tox_envlist: all + # Run all compute API tests and most scenario tests at the default + # concurrency (nproc/2 which is normally 4 in the gate). + tempest_test_regex: ^tempest\.(scenario|api\.compute) + # The tempest.scenario.test_network* tests are skipped because they + # (1) take a long time and (2) are already covered in the + # tempest-slow* job. If this regex gets more complicated use + # tempest_test_exclude_list. + # FIXME(lyarwood): The tempest.api.compute.admin.test_volume_swap tests + # are skipped until bug #1929710 is resolved. + tempest_exclude_regex: ^tempest\.(scenario\.test_network|api\.compute\.admin\.test_volume_swap|api\.compute\.servers\.test_device_tagging\.TaggedAttachmentsTest\.test_tagged_attachment) + devstack_local_conf: + post-config: + $NOVA_CPU_CONF: + libvirt: + # Increase the number of PCIe ports per instance given the q35 + # machine type attaches more devices by default than pc + num_pcie_ports: 24 + hw_machine_type: "x86_64=q35" + compute: + # Switch off the provider association refresh, which should + # reduce the number of placement calls in steady state. Added in + # Stein. + resource_provider_association_refresh: 0 + workarounds: + # This wa is an improvement on hard reboot that cannot be turned + # on unconditionally. But we know that ml2/ovs sends plug time + # events so we can enable this in this ovs job for vnic_type + # normal + wait_for_vif_plugged_event_during_hard_reboot: normal + $NOVA_CONF: + quota: + # Added in Train. + count_usage_from_placement: True + scheduler: + # Added in Train. + query_placement_for_image_type_support: True + "/$NEUTRON_CORE_PLUGIN_CONF": + # Needed for QoS port heal allocation testing. + ovs: + bridge_mappings: public:br-ex + resource_provider_bandwidths: br-ex:1000000:1000000 + AGENT: + tunnel_types: gre,vxlan + ml2: + type_drivers: flat,geneve,vlan,gre,local,vxlan + test-config: + $TEMPEST_CONFIG: + network-feature-enabled: + qos_placement_physnet: public + compute-feature-enabled: + # The q35 machine type doesn't support an IDE bus + ide_bus: False + neutron_plugin_options: + available_type_drivers: flat,geneve,vlan,gre,local,vxlan + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge + ML2_L3_PLUGIN: router + # Enable TLS between the noVNC proxy & compute nodes; this requires + # the tls-proxy service to be enabled. Added in Queens. + NOVA_CONSOLE_PROXY_COMPUTE_TLS: True + # Added in Stein. + ENABLE_VOLUME_MULTIATTACH: True + # Added in Ussuri. + FORCE_CONFIG_DRIVE: True + devstack_services: + # Disable OVN services + br-ex-tcpdump: false + br-int-flows: false + ovn-controller: false + ovn-northd: false + q-ovn-metadata-agent: false + # Neutron services + q-agt: true + q-dhcp: true + q-l3: true + q-meta: true + q-metering: true + tls-proxy: true + # neutron-* needed for QoS port heal allocation testing. + neutron-placement: true + neutron-qos: true + # Disable non-essential services that we don't need for this job. + c-bak: false + devstack_plugins: + # Needed for QoS port heal allocation testing. + neutron: https://opendev.org/openstack/neutron + group-vars: + subnode: + devstack_localrc: + Q_AGENT: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge + ML2_L3_PLUGIN: router + NOVA_USE_SERVICE_TOKEN: True + NOVA_CONSOLE_PROXY_COMPUTE_TLS: True + FORCE_CONFIG_DRIVE: True + devstack_services: + # Disable OVN services + br-ex-tcpdump: false + br-int-flows: false + ovn-controller: false + ovn-northd: false + ovs-vswitchd: false + ovsdb-server: false + q-ovn-metadata-agent: false + # Neutron services + q-agt: true + tls-proxy: true + c-bak: false + +- job: + name: nova-tempest-v2-api + parent: devstack-tempest + branches: + - master + description: | + This job runs the Tempest compute tests against v2.0 endpoint. + Former names for this job was: + * legacy-tempest-dsvm-nova-v20-api + vars: + tox_envlist: all + tempest_test_regex: api.*compute + devstack_localrc: + TEMPEST_COMPUTE_TYPE: compute_legacy + +- job: + name: nova-tempest-full-oslo.versionedobjects + parent: tempest-full-py3 + description: | + Run test with git version of oslo.versionedobjects to check that + changes to nova will work with the next released version of + that library. + required-projects: + - openstack/oslo.versionedobjects + +- job: + name: nova-grenade-multinode + parent: grenade-multinode + description: | + Run a multinode grenade job and run the smoke, cold and live migration + tests with the controller upgraded and the compute on the older release. + The former names for this job were "nova-grenade-live-migration" and + "legacy-grenade-dsvm-neutron-multinode-live-migration". + irrelevant-files: *nova-base-irrelevant-files + vars: + devstack_local_conf: + test-config: + $TEMPEST_CONFIG: + compute-feature-enabled: + live_migration: true + volume_backed_live_migration: true + block_migration_for_live_migration: true + # NOTE(lyarwood): Skip until bug #1931702 is resolved. + block_migrate_cinder_iscsi: false + tox_envlist: all + tempest_test_regex: ((tempest\.(api\.compute|scenario)\..*smoke.*)|(^tempest\.api\.compute\.admin\.(test_live_migration|test_migration))) + +- job: + name: nova-multi-cell + parent: tempest-multinode-full-py3 + description: | + Multi-node python3 job which runs with two nodes and two non-cell0 + cells. The compute on the controller runs in cell1 and the compute + on the subnode runs in cell2. + irrelevant-files: *nova-base-irrelevant-files + vars: + # We use the "all" environment for tempest_test_regex and + # tempest_test_exclude_list. + tox_envlist: all + # Run compute API and scenario tests. + tempest_test_regex: ^tempest\.(scenario|(api\.compute)) + tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/nova"].src_dir }}/devstack/nova-multi-cell-exclude-list.txt' + devstack_local_conf: + post-config: + $NOVA_CONF: + oslo_policy: + # The default policy file is policy.json but the + # setup-multi-cell-policy role will write to policy.yaml. + policy_file: policy.yaml + test-config: + $TEMPEST_CONFIG: + compute-feature-enabled: + # Enable cold migration for migrating across cells. Note that + # because NOVA_ALLOW_MOVE_TO_SAME_HOST=false, all cold migrations + # will move across cells. + cold_migration: true + devstack_services: + # Disable other non-essential services that we don't need for this job. + c-bak: false + devstack_localrc: + # Setup two non-cell0 cells (cell1 and cell2). + NOVA_NUM_CELLS: 2 + # Disable resize to the same host so all resizes will move across + # cells. + NOVA_ALLOW_MOVE_TO_SAME_HOST: false + # We only have two computes and we don't yet support cross-cell live + # migration. + LIVE_MIGRATION_AVAILABLE: false + DEVSTACK_PARALLEL: True + group-vars: + peers: + devstack_localrc: + NOVA_ALLOW_MOVE_TO_SAME_HOST: true + LIVE_MIGRATION_AVAILABLE: false + subnode: + devstack_localrc: + # The subnode compute will get registered with cell2. + NOVA_CPU_CELL: 2 + devstack_services: + # Disable other non-essential services that we don't need for this + # job. + c-bak: false + # Perform setup for the multi-cell environment. Note that this runs + # before devstack is setup on the controller host. + pre-run: playbooks/nova-multi-cell/pre.yaml + +- job: + name: nova-osprofiler-redis + parent: tempest-smoke-py3-osprofiler-redis + description: | + Runs osprofiler with the Redis collector on a subset of compute-specific + tempest-full-py3 smoke tests. + irrelevant-files: *nova-base-irrelevant-files + required-projects: + - openstack/nova + vars: + # We use the "all" environment for tempest_test_regex. + tox_envlist: all + # Run compute API and only the test_server_basic_ops scenario tests. + tempest_test_regex: ^tempest\.(scenario\.test_server_basic_ops|(api\.compute)) + +- job: + name: nova-ceph-multistore + parent: devstack-plugin-ceph-tempest-py3 + description: | + Just like the normal ceph job, but with glance multistore + irrelevant-files: *nova-base-irrelevant-files + required-projects: + - openstack/nova + pre-run: + - playbooks/ceph/glance-copy-policy.yaml + vars: + # NOTE(danms): These tests create an empty non-raw image, which nova + # will refuse because we set never_download_image_if_on_rbd in this job. + # Just skip these tests for this case. + devstack_localrc: + GLANCE_STANDALONE: True + GLANCE_USE_IMPORT_WORKFLOW: True + DEVSTACK_PARALLEL: True + devstack_local_conf: + post-config: + $NOVA_CONF: + libvirt: + images_rbd_glance_store_name: robust + workarounds: + never_download_image_if_on_rbd: True + $GLANCE_API_CONF: + DEFAULT: + enabled_backends: "cheap:file, robust:rbd" + default_log_levels: "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, oslo_messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO, dogpile.core.dogpile=INFO, oslo_policy=DEBUG" + glance_store: + default_backend: cheap + stores: file, http, rbd + default_store: file + robust: + rbd_store_pool: images + rbd_store_user: glance + rbd_store_ceph_conf: /etc/ceph/ceph.conf + cheap: + filesystem_store_datadir: /opt/stack/data/glance/images/ + os_glance_staging_store: + filesystem_store_datadir: /opt/stack/data/glance/os_glance_staging_store/ + os_glance_tasks_store: + filesystem_store_datadir: /opt/stack/data/glance/os_glance_tasks_store/ + $GLANCE_IMAGE_IMPORT_CONF: + image_import_opts: + image_import_plugins: "['image_conversion']" + image_conversion: + output_format: raw - project: # Please try to keep the list of job names sorted alphabetically. + templates: + - check-requirements + - integrated-gate-compute + - openstack-cover-jobs + - openstack-python3-xena-jobs + - openstack-python3-xena-jobs-arm64 + - periodic-stable-jobs + - publish-openstack-docs-pti + - release-notes-jobs-python3 check: jobs: # We define our own irrelevant-files so we don't run the job # on things like nova docs-only changes. - - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa: + - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa: voting: false - irrelevant-files: *dsvm-irrelevant-files - - nova-cells-v1 + irrelevant-files: *nova-base-irrelevant-files + - nova-ceph-multistore: + irrelevant-files: *nova-base-irrelevant-files + - neutron-linuxbridge-tempest: + irrelevant-files: + # NOTE(mriedem): This job has its own irrelevant-files section + # so that we only run it on changes to networking and libvirt/vif + # code; we don't need to run this on all changes. + - ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$ - nova-live-migration + - nova-live-migration-ceph - nova-lvm - - nova-multiattach + - nova-multi-cell - nova-next - - tempest-slow: - irrelevant-files: *dsvm-irrelevant-files - - nova-tox-functional - - nova-tox-functional-py35 - - openstack-tox-lower-constraints - - tempest-full-py3: - irrelevant-files: *dsvm-irrelevant-files + - nova-ovs-hybrid-plug + - nova-tox-validate-backport: + voting: false + - nova-tox-functional-centos8-py36 + - nova-tox-functional-py38 + - nova-tox-functional-py39: + voting: false + - tempest-integrated-compute: + # NOTE(gmann): Policies changes do not need to run all the + # integration test jobs. Running only tempest and grenade + # common jobs will be enough along with nova functional + # and unit tests. + irrelevant-files: &policies-irrelevant-files + - ^api-.*$ + - ^(test-|)requirements.txt$ + - ^.*\.rst$ + - ^.git.*$ + - ^doc/.*$ + - ^nova/hacking/.*$ + - ^nova/locale/.*$ + - ^nova/tests/.*$ + - ^nova/test.py$ + - ^releasenotes/.*$ + - ^setup.cfg$ + - ^tools/.*$ + - ^tox.ini$ + - nova-grenade-multinode: + irrelevant-files: *policies-irrelevant-files + - tempest-ipv6-only: + irrelevant-files: *nova-base-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *nova-base-irrelevant-files + - cyborg-tempest: + irrelevant-files: *nova-base-irrelevant-files + voting: false + - barbican-tempest-plugin-simple-crypto: + irrelevant-files: *nova-base-irrelevant-files + voting: false gate: jobs: - - nova-cells-v1 - nova-live-migration - - nova-multiattach + - nova-live-migration-ceph + - nova-tox-functional-centos8-py36 + - nova-tox-functional-py38 + - nova-multi-cell - nova-next - - tempest-slow: - irrelevant-files: *dsvm-irrelevant-files - - nova-tox-functional - - nova-tox-functional-py35 - - openstack-tox-lower-constraints - - tempest-full-py3: - irrelevant-files: *dsvm-irrelevant-files + - nova-tox-validate-backport + - nova-ceph-multistore: + irrelevant-files: *nova-base-irrelevant-files + - neutron-linuxbridge-tempest: + irrelevant-files: + # NOTE(mriedem): This job has its own irrelevant-files section + # so that we only run it on changes to networking and libvirt/vif + # code; we don't need to run this on all changes. + - ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$ + - tempest-integrated-compute: + irrelevant-files: *policies-irrelevant-files + - nova-grenade-multinode: + irrelevant-files: *policies-irrelevant-files + - tempest-ipv6-only: + irrelevant-files: *nova-base-irrelevant-files + - openstacksdk-functional-devstack: + irrelevant-files: *nova-base-irrelevant-files experimental: jobs: - - nova-caching-scheduler - - os-vif-ovs + - ironic-tempest-bfv: + irrelevant-files: *nova-base-irrelevant-files + - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode: + irrelevant-files: *nova-base-irrelevant-files + - devstack-plugin-nfs-tempest-full: + irrelevant-files: *nova-base-irrelevant-files + - nova-osprofiler-redis + - tempest-full-py3-opensuse15: + irrelevant-files: *nova-base-irrelevant-files + - tempest-pg-full: + irrelevant-files: *nova-base-irrelevant-files + - nova-tempest-full-oslo.versionedobjects: + irrelevant-files: *nova-base-irrelevant-files + - nova-tempest-v2-api: + irrelevant-files: *nova-base-irrelevant-files + - neutron-ovs-tempest-dvr-ha-multinode-full: + irrelevant-files: *nova-base-irrelevant-files + - neutron-ovs-tempest-iptables_hybrid: + irrelevant-files: *nova-base-irrelevant-files + - os-vif-ovs: + irrelevant-files: *nova-base-irrelevant-files + - devstack-platform-fedora-latest: + irrelevant-files: *nova-base-irrelevant-files + - devstack-platform-fedora-latest-virt-preview: + irrelevant-files: *nova-base-irrelevant-files + - devstack-plugin-ceph-compute-local-ephemeral: + irrelevant-files: *nova-base-irrelevant-files diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 535791a4512..f3f8b3ae208 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,16 +1,19 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: +The source repository for this project can be found at: - https://docs.openstack.org/infra/manual/developers.html + https://opendev.org/openstack/nova -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: +Pull requests submitted through GitHub are not monitored. - https://docs.openstack.org/infra/manual/developers.html#development-workflow +To start contributing to OpenStack, follow the steps in the contribution guide +to set up and use Gerrit: -Pull requests submitted through GitHub will be ignored. + https://docs.openstack.org/contributors/code-and-documentation/quick-start.html -Bugs should be filed on Launchpad, not GitHub: +Bugs should be filed on Launchpad: https://bugs.launchpad.net/nova + +For more specific information about contributing to this repository, see the +Nova contributor guide: + + https://docs.openstack.org/nova/latest/contributor/contributing.html diff --git a/HACKING.rst b/HACKING.rst index 148097561af..0f98901864d 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -8,7 +8,7 @@ Nova Style Commandments Nova Specific Commandments --------------------------- -- ``nova.db`` imports are not allowed in ``nova/virt/*`` +- [N307] ``nova.db`` imports are not allowed in ``nova/virt/*`` - [N309] no db session in public API methods (disabled) This enforces a guideline defined in ``oslo.db.sqlalchemy.session`` - [N310] timeutils.utcnow() wrapper must be used instead of direct calls to @@ -25,19 +25,13 @@ Nova Specific Commandments assertIsInstance(A, B). - [N317] Change assertEqual(type(A), B) by optimal assert like assertIsInstance(A, B) -- [N319] Validate that debug level logs are not translated. +- [N319] Validate that logs are not translated. - [N320] Setting CONF.* attributes directly in tests is forbidden. Use self.flags(option=value) instead. -- [N321] Validate that LOG messages, except debug ones, have translations - [N322] Method's default argument shouldn't be mutable - [N323] Ensure that the _() function is explicitly imported to ensure proper translations. - [N324] Ensure that jsonutils.%(fun)s must be used instead of json.%(fun)s -- [N325] str() and unicode() cannot be used on an exception. Remove use or use six.text_type() - [N326] Translated messages cannot be concatenated. String should be included in translated message. -- [N327] Do not use xrange(). xrange() is not compatible with Python 3. Use range() or six.moves.range() instead. -- [N328] Validate that LOG.info messages use _LI. -- [N329] Validate that LOG.exception messages use _LE. -- [N330] Validate that LOG.warning and LOG.warn messages use _LW. - [N332] Check that the api_version decorator is the first decorator on a method - [N334] Change assertTrue/False(A in/not in B, message) to the more specific assertIn/NotIn(A, B, message) @@ -52,9 +46,6 @@ Nova Specific Commandments - [N341] contextlib.nested is deprecated - [N342] Config options should be in the central location ``nova/conf/`` - [N343] Check for common double word typos -- [N344] Python 3: do not use dict.iteritems. -- [N345] Python 3: do not use dict.iterkeys. -- [N346] Python 3: do not use dict.itervalues. - [N348] Deprecated library function os.popen() - [N349] Check for closures in tests which are not used - [N350] Policy registration should be in the central location ``nova/policies/`` @@ -68,6 +59,18 @@ Nova Specific Commandments - [N358] Return must always be followed by a space when returning a value. - [N359] Check for redundant import aliases. - [N360] Yield must always be followed by a space when yielding a value. +- [N361] Check for usage of deprecated assertRegexpMatches and + assertNotRegexpMatches +- [N362] Imports for privsep modules should be specific. Use "import nova.privsep.path", + not "from nova.privsep import path". This ensures callers know that the method they're + calling is using priviledge escalation. +- [N363] Disallow ``(not_a_tuple)`` because you meant ``(a_tuple_of_one,)``. +- [N364] Check non-existent mock assertion methods and attributes. +- [N365] Check misuse of assertTrue/assertIsNone. +- [N366] The assert_has_calls is a method rather than a variable. +- [N367] Disallow aliasing the mock.Mock and similar classes in tests. +- [N368] Reject if the mock.Mock class is used as a replacement value instead of and + instance of a mock.Mock during patching in tests. Creating Unit Tests ------------------- @@ -112,6 +115,34 @@ command directly. Running ``stestr run`` will run the entire test suite. tests in parallel). More information about stestr can be found at: http://stestr.readthedocs.io/ +Since when testing locally, running the entire test suite on a regular +basis is prohibitively expensive, the ``tools/run-tests-for-diff.sh`` +script is provided as a convenient way to run selected tests using +output from ``git diff``. For example, this allows running only the +test files changed/added in the working tree:: + + tools/run-tests-for-diff.sh + +However since it passes its arguments directly to ``git diff``, tests +can be selected in lots of other interesting ways, e.g. it can run all +tests affected by a single commit at the tip of a given branch:: + + tools/run-tests-for-diff.sh mybranch^! + +or all those affected by a range of commits, e.g. a branch containing +a whole patch series for a blueprint:: + + tools/run-tests-for-diff.sh gerrit/master..bp/my-blueprint + +It supports the same ``-HEAD`` invocation syntax as ``flake8wrap.sh`` +(as used by the ``fast8`` tox environment):: + + tools/run-tests-for-diff.sh -HEAD + +By default tests log at ``INFO`` level. It is possible to make them +log at ``DEBUG`` level by exporting the ``OS_DEBUG`` environment +variable to ``True``. + .. _Development Quickstart: https://docs.openstack.org/nova/latest/contributor/development-environment.html Building Docs diff --git a/README.rst b/README.rst index 7cf790f1eeb..2b7eda2a65e 100644 --- a/README.rst +++ b/README.rst @@ -1,18 +1,16 @@ -======================== -Team and repository tags -======================== +============== +OpenStack Nova +============== .. image:: https://governance.openstack.org/tc/badges/nova.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on -OpenStack Nova -============== OpenStack Nova provides a cloud computing fabric controller, supporting a wide variety of compute technologies, including: libvirt (KVM, Xen, LXC and more), -Hyper-V, VMware, XenServer, OpenStack Ironic and PowerVM. +Hyper-V, VMware, OpenStack Ironic and PowerVM. Use the following resources to learn more. @@ -21,8 +19,8 @@ API To learn how to use Nova's API, consult the documentation available online at: -- `Compute API Guide `__ -- `Compute API Reference `__ +- `Compute API Guide `__ +- `Compute API Reference `__ For more information on OpenStack APIs, SDKs and CLIs in general, refer to: diff --git a/api-guide/source/accelerator-support.rst b/api-guide/source/accelerator-support.rst new file mode 100644 index 00000000000..c71e899fd48 --- /dev/null +++ b/api-guide/source/accelerator-support.rst @@ -0,0 +1,143 @@ +============================== +Using accelerators with Cyborg +============================== + +Starting from microversion 2.82, nova supports creating servers with +accelerators provisioned with the Cyborg service, which provides lifecycle +management for accelerators. + +To launch servers with accelerators, the administrator (or an user with +appropriate privileges) must do the following: + +* Create a device profile in Cyborg, which specifies what accelerator + resources need to be provisioned. (See `Cyborg device profiles API`_.) + + .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles + +* Set the device profile name as an extra spec in a chosen flavor, + with this syntax: + + .. code:: + + accel:device_profile=$device_profile_name + + The chosen flavor may be a newly created one or an existing one. + +* Use that flavor to create a server: + + .. code:: + + openstack server create --flavor $myflavor --image $myimage $servername + +Nova supports only specific operations for instances with accelerators. +The lists of supported and unsupported operations are as below: + +* Supported operations. + + * Creation and deletion. + * Reboots (soft and hard). + * Pause and unpause. + * Stop and start. + * Take a snapshot. + * Backup. + * Rescue and unrescue. + * Rebuild. + * Evacuate. + * Shelve and unshelve. + +* Unsupported operations + + * Resize. + * Suspend and resume. + * Cold migration. + * Live migration. + +.. versionchanged:: 22.0.0(Victoria) + + Added support for rebuild and evacuate operations. + +.. versionchanged:: 23.0.0(Wallaby) + + Added support for shelve and unshelve operations. + +Some operations, such as lock and unlock, work as they are effectively +no-ops for accelerators. + +Caveats +------- + +.. note:: + + This information is correct as of the 21.0.0 Ussuri release. Where + improvements have been made or issues fixed, they are noted per item. + +For nested resource providers: + +* Creating servers with accelerators provisioned with the Cyborg service, if + a flavor asks for resources that are provided by nested Resource Provider + inventories (e.g. vGPU) and the user wants multi-create (i.e. say --max 2) + then the scheduler could be returning a NoValidHosts exception even if each + nested Resource Provider can support at least one specific instance, if the + total wanted capacity is not supported by only one nested Resource Provider. + (See `bug 1874664 `_.) + + For example, creating servers with accelerators provisioned with the Cyborg + service, if two children RPs have 4 vGPU inventories each: + + * You can ask for a device profile in the flavor with 2 vGPU with --max 2. + * But you can't ask for a device profile in the flavor with 4 vGPU and + --max 2. + +======================= +Using SRIOV with Cyborg +======================= + +Starting from Xena release, nova supports creating servers with +SRIOV provisioned with the Cyborg service. + +To launch servers with accelerators, the administrator (or an user with +appropriate privileges) must do the following: + +* Create a device profile in Cyborg, which specifies what accelerator + resources need to be provisioned. (See `Cyborg device profiles API`_, + `Cyborg SRIOV Test Report`_.) + + .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles + .. _`Cyborg SRIOV Test Report`: https://wiki.openstack.org/wiki/Cyborg/TestReport/IntelNic + +* create a 'accelerator-direct' vnic type port with the device-profile name + set as cyborg device profile with this syntax: + + .. code:: + + openstack port create $port_name --network $network_name --vnic-type=accelerator-direct --device-profile $device_profile_name + +* create a server with that port: + + .. code:: + + openstack server create --flavor $myflavor --image $myimage $servername --nic port-id=$port-ID + +Nova supports only specific operations for instances with accelerators. +The lists of supported and unsupported operations are as below: + +* Supported operations. + + * Creation and deletion. + * Reboots (soft and hard). + * Pause and unpause. + * Stop and start. + * Rebuild. + * Rescue and unrescue. + * Take a snapshot. + * Backup. + +* Unsupported operations + + * Resize. + * Suspend and resume. + * Cold migration. + * Live migration. + * Shelve and unshelve. + * Evacuate. + * Attach/detach a port with device profile. diff --git a/api-guide/source/conf.py b/api-guide/source/conf.py index 6b0411a3d4f..e07de9e4509 100644 --- a/api-guide/source/conf.py +++ b/api-guide/source/conf.py @@ -31,7 +31,8 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['openstackdocstheme'] +extensions = ['openstackdocstheme', + 'sphinx.ext.todo'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -39,6 +40,9 @@ # The suffix of source filenames. source_suffix = '.rst' +# The 'todo' and 'todolist' directive produce output. +todo_include_todos = True + # The encoding of source files. # source_encoding = 'utf-8-sig' @@ -46,12 +50,6 @@ master_doc = 'index' # General information about the project. project = u'Compute API Guide' -bug_tag = u'api-guide' -repository_name = 'openstack/nova' -bug_project = 'nova' - -# Must set this variable to include year, month, day, hours, and minutes. -html_last_updated_fmt = '%Y-%m-%d %H:%M' copyright = u'2015, OpenStack contributors' @@ -94,7 +92,7 @@ # show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -143,10 +141,6 @@ # directly to the root of the documentation. # html_extra_path = [] -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True @@ -280,6 +274,15 @@ # -- Options for openstackdocstheme ------------------------------------------- -openstack_projects = [ +openstackdocs_projects = [ + 'glance', 'nova', + 'neutron', + 'placement', ] + +openstackdocs_bug_tag = u'api-guide' +openstackdocs_repo_name = 'openstack/nova' +openstackdocs_bug_project = 'nova' +openstackdocs_auto_version = False +openstackdocs_auto_name = False diff --git a/api-guide/source/down_cells.rst b/api-guide/source/down_cells.rst new file mode 100644 index 00000000000..bd5980d4d3d --- /dev/null +++ b/api-guide/source/down_cells.rst @@ -0,0 +1,353 @@ +=================== +Handling Down Cells +=================== + +Starting from microversion 2.69 if there are transient conditions in a +deployment like partial infrastructure failures (for example a cell +not being reachable), some API responses may contain partial results +(i.e. be missing some keys). The server operations which exhibit this +behavior are described below: + +* List Servers (GET /servers): This operation may give partial + constructs from the non-responsive portion of the infrastructure. A + typical response, while listing servers from unreachable parts of + the infrastructure, would include only the following keys from + available information: + + - status: The state of the server which will be "UNKNOWN". + - id: The UUID of the server. + - links: Links to the servers in question. + + A sample response for a GET /servers request that includes one + result each from an unreachable and a healthy part of the + infrastructure is shown below. + + Response:: + + { + "servers": [ + { + "status": "UNKNOWN", + "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb", + "links": [ + { + "rel": "self", + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb" + }, + { + "rel": "bookmark", + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb" + } + ] + }, + { + "id": "22c91117-08de-4894-9aa9-6ef382400985", + "name": "test_server", + "links": [ + { + "rel": "self", + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985" + }, + { + "rel": "bookmark", + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985" + } + ] + } + ] + } + +* List Servers Detailed (GET /servers/detail): This operation may give + partial constructs from the non-responsive portion of the + infrastructure. A typical response, while listing servers from + unreachable parts of the infrastructure, would include only the + following keys from available information: + + - status: The state of the server which will be "UNKNOWN". + - id: The UUID of the server. + - tenant_id: The tenant_id to which the server belongs to. + - created: The time of server creation. + - links: Links to the servers in question. + - security_groups: One or more security groups. (Optional) + + A sample response for a GET /servers/details request that includes + one result each from an unreachable and a healthy part of the + infrastructure is shown below. + + Response:: + + { + "servers": [ + { + "created": "2018-06-29T15:07:29Z", + "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb", + "status": "UNKNOWN", + "tenant_id": "940f47b984034c7f8f9624ab28f5643c", + "security_groups": [ + { + "name": "default" + } + ], + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb", + "rel": "bookmark" + } + ] + }, + { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:host": "compute", + "OS-EXT-SRV-ATTR:hostname": "new-server-test", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:kernel_id": "", + "OS-EXT-SRV-ATTR:launch_index": 0, + "OS-EXT-SRV-ATTR:ramdisk_id": "", + "OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k", + "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", + "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2017-10-10T15:49:09.516729", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.0.3", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2017-10-10T15:49:08Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": { + "hw:numa_nodes": "1" + }, + "original_name": "m1.tiny.specs", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "host_status": "UP", + "id": "569f39f9-7c76-42a1-9c2d-8394e2638a6d", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", + "rel": "bookmark" + } + ], + "locked": false, + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": [ + "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", + "674736e3-f25c-405c-8362-bbf991e0ce0a" + ], + "updated": "2017-10-10T15:49:09Z", + "user_id": "fake" + } + ] + } + + **Edge Cases** + + * **Filters:** If the user is listing servers using filters, results + from unreachable parts of the infrastructure cannot be tested for + matching those filters and thus no minimalistic construct will be + provided. Note that by default ``openstack server list`` uses the + ``deleted=False`` and ``project_id=tenant_id`` filters and since + we know both of these fundamental values at all times, they are + the only allowed filters to be applied to servers with only + partial information available. Hence only doing ``openstack + server list`` and ``openstack server list --all-projects`` (admin + only) will show minimalistic results when parts of the + infrastructure are unreachable. Other filters like ``openstack + server list --deleted`` or ``openstack server list --host xx`` + will skip the results depending on the administrator's + configuration of the deployment. Note that the filter ``openstack + server list --limit`` will also skip the results and if not + specified will return 1000 (or the configured default) records + from the available parts of the infrastructure. + + * **Marker:** If the user does ``openstack server list --marker`` it will + fail with a 500 if the marker is an instance that is no longer reachable. + + * **Sorting:** We exclude the unreachable parts of the infrastructure just like + we do for filters since there is no way of obtaining valid sorted results from + those parts with missing information. + + * **Paging:** We ignore the parts of the deployment which are non-responsive. + For example if we have three cells A (reachable state), B (unreachable state) + and C (reachable state) and if the marker is half way in A, we would get the + remaining half of the results from A, all the results from C and ignore cell B. + + .. note:: All the edge cases that are not supported for minimal constructs would + give responses based on the administrator's configuration of the deployment, + either skipping those results or returning an error. + +* Show Server Details (GET /servers/{server_id}): This operation may + give partial constructs from the non-responsive portion of the + infrastructure. A typical response while viewing a server from an + unreachable part of the infrastructure would include only the + following keys from available information: + + - status: The state of the server which will be "UNKNOWN". + - id: The UUID of the server. + - tenant_id: The tenant_id to which the server belongs to. + - created: The time of server creation. + - user_id: The user_id to which the server belongs to. This may be "UNKNOWN" + for older servers. + - image: The image details of the server. If it is not set like + in the boot-from-volume case, this value will be an empty string. + - flavor: The flavor details of the server. + - availability_zone: The availability_zone of the server if it was specified + during boot time and "UNKNOWN" otherwise. + - power_state: Its value will be 0 (``NOSTATE``). + - links: Links to the servers in question. + - server_groups: The UUIDs of the server groups to which the server belongs. + Currently this can contain at most one entry. Note that this key will be in + the response only from the "2.71" microversion. + + A sample response for a GET /servers/{server_id} request that + includes one server from an unreachable part of the infrastructure + is shown below. + + Response:: + + { + "server": [ + { + "created": "2018-06-29T15:07:29Z", + "status": "UNKNOWN", + "tenant_id": "940f47b984034c7f8f9624ab28f5643c", + "id": "bcc6c6dd-3d0a-4633-9586-60878fd68edb", + "user_id": "940f47b984034c7f8f9624ab28f5643c", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + }, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": { + "hw:numa_nodes": "1" + }, + "original_name": "m1.tiny.specs", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "OS-EXT-AZ:availability_zone": "geneva", + "OS-EXT-STS:power_state": 0, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/bcc6c6dd-3d0a-4633-9586-60878fd68edb", + "rel": "bookmark" + } + ], + "server_groups": ["0fd77252-4eef-4ec4-ae9b-e05dfc98aeac"] + } + ] + } + +* List Compute Services (GET /os-services): This operation may give + partial constructs for the services with :program:`nova-compute` as + their binary from the non-responsive portion of the + infrastructure. A typical response while listing the compute + services from unreachable parts of the infrastructure would include + only the following keys for the :program:`nova-compute` services + from available information while the other services like the + :program:`nova-conductor` service will be skipped from the result: + + - binary: The binary name of the service which would always be + ``nova-compute``. + - host: The name of the host running the service. + - status: The status of the service which will be "UNKNOWN". + + A sample response for a GET /servers request that includes two + compute services from unreachable parts of the infrastructure and + other services from a healthy one are shown below. + + Response:: + + { + "services": [ + { + "binary": "nova-compute", + "host": "host1", + "status": "UNKNOWN" + }, + { + "binary": "nova-compute", + "host": "host2", + "status": "UNKNOWN" + }, + { + "id": 1, + "binary": "nova-scheduler", + "disabled_reason": "test1", + "host": "host3", + "state": "up", + "status": "disabled", + "updated_at": "2012-10-29T13:42:02.000000", + "forced_down": false, + "zone": "internal" + }, + { + "id": 2, + "binary": "nova-compute", + "disabled_reason": "test2", + "host": "host4", + "state": "up", + "status": "disabled", + "updated_at": "2012-10-29T13:42:05.000000", + "forced_down": false, + "zone": "nova" + } + ] + } diff --git a/api-guide/source/extra_specs_and_properties.rst b/api-guide/source/extra_specs_and_properties.rst index 29a9ffe942f..cd5411789a6 100644 --- a/api-guide/source/extra_specs_and_properties.rst +++ b/api-guide/source/extra_specs_and_properties.rst @@ -2,16 +2,44 @@ Flavor Extra Specs and Image Properties ======================================= -TODO: Generic description about Flavor Extra Specs and Image Properties. +Flavor extra specs and image properties are used to control certain aspects +or scheduling behavior for a server. + +The flavor of a server can be changed during a +:nova-doc:`resize ` operation. + +The image of a server can be changed during a +:nova-doc:`rebuild ` operation. + +By default, flavor extra specs are controlled by administrators of the cloud. +If users are authorized to upload their own images to the image service, they +may be able to specify their own image property requirements. + +There are many cases of flavor extra specs and image properties that are for +the same functionality. In many cases the image property takes precedence over +the flavor extra spec if both are used in the same server. Flavor Extra Specs ================== -TODO: List the extra specs which we supported at here. The best is the extra -specs can auto-gen from the nova code. +Refer to the :nova-doc:`user guide ` for a +list of official extra specs. + +While there are standard extra specs, deployments can define their own extra +specs to be used with host aggregates and custom scheduler filters as +necessary. See the +:nova-doc:`reference guide ` +for more details. Image Properties ================ -TODO: List the properties which affect the server creation. The best is the -properties can auto-gen from the image properties object. +Refer to the image service documentation for a list of official +:glance-doc:`image properties ` and +:glance-doc:`metadata definition concepts `. + +Unlike flavor extra specs, image properties are standardized in the compute +service and thus they must be `registered`_ within the compute service before +they can be used. + +.. _registered: https://opendev.org/openstack/nova/src/branch/master/nova/objects/image_meta.py diff --git a/api-guide/source/faults.rst b/api-guide/source/faults.rst index 88d0cca5f9c..529b119a457 100644 --- a/api-guide/source/faults.rst +++ b/api-guide/source/faults.rst @@ -67,11 +67,167 @@ Response header example:: Server Actions -------------- -There is an API for end users to list the outcome of Server Actions, -referencing the requested action by request id. +Most `server action APIs`_ are asynchronous. Usually the API service will do +some minimal work and then send the request off to the ``nova-compute`` service +to complete the action and the API will return a 202 response to the client. +The client will poll the API until the operation completes, which could be a +status change on the server but is usually at least always waiting for the +server ``OS-EXT-STS:task_state`` field to go to ``null`` indicating the action +has completed either successfully or with an error. + +If a server action fails and the server status changes to ``ERROR`` an +:ref:`instance fault ` will be shown with the server details. + +The `os-instance-actions API`_ allows users end users to list the outcome of +server actions, referencing the requested action by request id. This is useful +when an action fails and the server status does not change to ``ERROR``. + +To illustrate, consider a server (vm1) created with flavor ``m1.tiny``: + +.. code-block:: console + + $ openstack server create --flavor m1.tiny --image cirros-0.4.0-x86_64-disk --wait vm1 + +-----------------------------+-----------------------------------------------------------------+ + | Field | Value | + +-----------------------------+-----------------------------------------------------------------+ + | OS-DCF:diskConfig | MANUAL | + | OS-EXT-AZ:availability_zone | nova | + | OS-EXT-STS:power_state | Running | + | OS-EXT-STS:task_state | None | + | OS-EXT-STS:vm_state | active | + | OS-SRV-USG:launched_at | 2019-12-02T19:14:48.000000 | + | OS-SRV-USG:terminated_at | None | + | accessIPv4 | | + | accessIPv6 | | + | addresses | private=10.0.0.60, fda0:e0c4:2764:0:f816:3eff:fe03:806 | + | adminPass | NgascCr3dYo4 | + | config_drive | | + | created | 2019-12-02T19:14:42Z | + | flavor | m1.tiny (1) | + | hostId | 22e88bec09a7e33606348fce0abac0ebbbe091a35e29db1498ec4e14 | + | id | 344174b8-34fd-4017-ae29-b9084dcf3861 | + | image | cirros-0.4.0-x86_64-disk (cce5e6d6-d359-4152-b277-1b4f1871557f) | + | key_name | None | + | name | vm1 | + | progress | 0 | + | project_id | b22597ea961545f3bde1b2ede0bd5b91 | + | properties | | + | security_groups | name='default' | + | status | ACTIVE | + | updated | 2019-12-02T19:14:49Z | + | user_id | 046033fb3f824550999752b6525adbac | + | volumes_attached | | + +-----------------------------+-----------------------------------------------------------------+ + +The owner of the server then tries to resize the server to flavor ``m1.small`` +which fails because there are no hosts available on which to resize the server: + +.. code-block:: console + + $ openstack server resize --flavor m1.small --wait vm1 + Complete + +Despite the openstack command saying the operation completed, the server shows +the original ``m1.tiny`` flavor and the status is not ``VERIFY_RESIZE``: + +.. code-block:: + + $ openstack server show vm1 -f value -c status -c flavor + m1.tiny (1) + ACTIVE + +Since the status is not ``ERROR`` there are is no ``fault`` field in the server +details so we find the details by listing the events for the server: + +.. code-block:: console + + $ openstack server event list vm1 + +------------------------------------------+--------------------------------------+--------+----------------------------+ + | Request ID | Server ID | Action | Start Time | + +------------------------------------------+--------------------------------------+--------+----------------------------+ + | req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae | 344174b8-34fd-4017-ae29-b9084dcf3861 | resize | 2019-12-02T19:15:35.000000 | + | req-4cdc4c93-0668-4ae6-98c8-a0a5fcc63d39 | 344174b8-34fd-4017-ae29-b9084dcf3861 | create | 2019-12-02T19:14:42.000000 | + +------------------------------------------+--------------------------------------+--------+----------------------------+ + +To see details about the ``resize`` action, we use the Request ID for that +action: + +.. code-block:: console + + $ openstack server event show vm1 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae + +---------------+------------------------------------------+ + | Field | Value | + +---------------+------------------------------------------+ + | action | resize | + | instance_uuid | 344174b8-34fd-4017-ae29-b9084dcf3861 | + | message | Error | + | project_id | b22597ea961545f3bde1b2ede0bd5b91 | + | request_id | req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae | + | start_time | 2019-12-02T19:15:35.000000 | + | user_id | 046033fb3f824550999752b6525adbac | + +---------------+------------------------------------------+ + +We see the message is "Error" but are not sure what failed. By default the +event details for an action are not shown to users without the admin role so +use microversion 2.51 to see the events (the ``events`` field is JSON-formatted +here for readability): + +.. code-block:: + + $ openstack --os-compute-api-version 2.51 server event show vm1 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae -f json -c events + { + "events": [ + { + "event": "cold_migrate", + "start_time": "2019-12-02T19:15:35.000000", + "finish_time": "2019-12-02T19:15:36.000000", + "result": "Error" + }, + { + "event": "conductor_migrate_server", + "start_time": "2019-12-02T19:15:35.000000", + "finish_time": "2019-12-02T19:15:36.000000", + "result": "Error" + } + ] + } + +By default policy configuration a user with the admin role can see a +``traceback`` for each failed event just like with an instance fault: + +.. code-block:: -For more details, please see: -https://developer.openstack.org/api-ref/compute/#servers-run-an-action-servers-action + $ source openrc admin admin + $ openstack --os-compute-api-version 2.51 server event show 344174b8-34fd-4017-ae29-b9084dcf3861 req-ea1b0dfc-3186-42a9-84ff-c4f4fb130fae -f json -c events + { + "events": [ + { + "event": "cold_migrate", + "start_time": "2019-12-02T19:15:35.000000", + "finish_time": "2019-12-02T19:15:36.000000", + "result": "Error", + "traceback": " File \"/opt/stack/nova/nova/conductor/manager.py\", + line 301, in migrate_server\n host_list)\n + File \"/opt/stack/nova/nova/conductor/manager.py\", line 367, in + _cold_migrate\n raise exception.NoValidHost(reason=msg)\n" + }, + { + "event": "conductor_migrate_server", + "start_time": "2019-12-02T19:15:35.000000", + "finish_time": "2019-12-02T19:15:36.000000", + "result": "Error", + "traceback": " File \"/opt/stack/nova/nova/compute/utils.py\", + line 1410, in decorated_function\n return function(self, context, + *args, **kwargs)\n File \"/opt/stack/nova/nova/conductor/manager.py\", + line 301, in migrate_server\n host_list)\n + File \"/opt/stack/nova/nova/conductor/manager.py\", line 367, in + _cold_migrate\n raise exception.NoValidHost(reason=msg)\n" + } + ] + } + +.. _server action APIs: https://docs.openstack.org/api-ref/compute/#servers-run-an-action-servers-action +.. _os-instance-actions API: https://docs.openstack.org/api-ref/compute/#servers-actions-servers-os-instance-actions Logs ---- @@ -104,6 +260,8 @@ while neutron is using local request ID The local request IDs are useful to make 'call graphs'. +.. _instance-fault: + Instance Faults --------------- @@ -135,6 +293,7 @@ In many cases there are also notifications emitted that describe the error. This is an administrator focused API, that works best when treated as structured logging. +.. _synchronous_faults: Synchronous Faults ================== @@ -167,7 +326,7 @@ depending on the type of error. The following link contains a list of possible elements along with their associated error codes. For more information on possible error code, please see: -http://specs.openstack.org/openstack/api-wg/guidelines/http.html#http-response-codes +http://specs.openstack.org/openstack/api-wg/guidelines/http/response-codes.html Asynchronous faults =================== @@ -179,7 +338,7 @@ In these cases, the server is usually placed in an ``ERROR`` state. For some operations, like resize, it is possible that the operation fails but the instance gracefully returned to its original state before attempting the operation. In both of these cases, you should be able to find out more from -the Server Actions API described above. +the `Server Actions`_ API described above. When a server is placed into an ``ERROR`` state, a fault is embedded in the offending server. Note that these asynchronous faults follow the same format diff --git a/api-guide/source/general_info.rst b/api-guide/source/general_info.rst index 3dca099d6d4..b0e85749605 100644 --- a/api-guide/source/general_info.rst +++ b/api-guide/source/general_info.rst @@ -42,7 +42,7 @@ several key concepts: - **Flavor Extra Specs** Key and value pairs that can be used to describe the specification of - the server which more than just about CPU, disk and RAM. For example, + the server which is more than just about CPU, disk and RAM. For example, it can be used to indicate that the server created by this flavor has PCI devices, etc. @@ -60,7 +60,7 @@ several key concepts: - **Image Properties** Key and value pairs that can help end users to determine the requirements - of the guest os in the image. + of the guest operating system in the image. For more details, please see: :doc:`extra_specs_and_properties` @@ -108,23 +108,27 @@ several key concepts: Networking Concepts ------------------- -In this section we focus on this related to networking. +Networking is handled by the :neutron-doc:`networking service <>`. When working +with a server in the compute service, the most important networking resource +is a *port* which is part of a *network*. Ports can have *security groups* +applied to control firewall access. Ports can also be linked to *floating IPs* +for external network access depending on the networking service configuration. -- **Port** +When creating a server or attaching a network interface to an existing server, +zero or more networks and/or ports can be specified to attach to the server. +If nothing is provided, the compute service will by default create a port on +the single network available to the project making the request. If more than +one network is available to the project, such as a public external network and +a private tenant network, an error will occur and the request will have to be +made with a specific network or port. If a network is specified the compute +service will attempt to create a port on the given network on behalf of the +user. More advanced types of ports, such as +:neutron-doc:`SR-IOV ports `, must be pre-created and +provided to the compute service. - TODO +Refer to the `network API reference`_ for more details. -- **Floating IPs, Pools and DNS** - - TODO - -- **Security Groups** - - TODO - -- **Extended Networks** - - TODO +.. _network API reference: https://docs.openstack.org/api-ref/network/ Administrator Concepts @@ -171,17 +175,12 @@ on compute hosts rather than servers. This service runs on every compute node, and communicates with a hypervisor for managing compute resources on that node. - - **nova-network (deprecated)** - - This service handles networking of virtual servers. It is no longer under - active development, and is being replaced by Neutron. - - - **nova-consoleauth (deprecated)** - - This service provides authorization for compute instances consoles. - - **Services Actions** + .. note:: + The services actions described in this section apply only to + **nova-compute** services. + - **enable, disable, disable-log-reason** The service can be disabled to indicate the service is not available anymore. @@ -196,20 +195,31 @@ on compute hosts rather than servers. .. note:: This action is enabled in microversion 2.11. - This action allows you set the state of service down immediately. Actually - Nova only provides the health monitor of service status, there isn't any - guarantee about health status of other parts of infrastructure, like the - health status of data network, storage network and other components. The - more complete health monitor of infrastructure is provided by external - system normally. An external health monitor system can mark the service - down for notifying the fault. + This action allows you set the state of service down immediately. Nova + only provides a very basic health monitor of service status, there isn't + any guarantee about health status of other parts of infrastructure, like + the health status of data network, storage network and other + components. + + If you have a more extensive health monitoring system external to Nova, + and know that the service in question is dead (and disconnected from the + network), this can be used to tell the rest of Nova it can trust that this + service is never coming back, and allow actions such as evacuate. + + .. warning:: + + This must *only* be used if you have fully fenced the service in + question, and that it can never send updates to the rest of the + system. This can be done by powering off the node or completely + isolating its networking. If you force-down a service that is not + fenced you can corrupt the VMs that were running on that host. - **Hosts** Hosts are the *physical machines* that provide the resources for the virtual - servers created in Nova. They run a ``hypervisor`` (see definition below) + servers created in Nova. They run a **hypervisor** (see definition below) that handles the actual creation and management of the virtual servers. - Hosts also run the ``Nova compute service``, which receives requests from + Hosts also run the **Nova compute service**, which receives requests from Nova to interact with the virtual servers on that machine. When compute service receives a request, it calls the appropriate methods of the driver for that hypervisor in order to carry out the request. The driver acts as @@ -261,30 +271,3 @@ on compute hosts rather than servers. Administrators are able to query the records in database for information about migrations. For example, they can determine the source and destination hosts, type of migration, or changes in the server's flavor. - -Relationship with Volume API -============================ - -Here we discuss about Cinder's API and how Nova users volume UUIDs. - -TODO - add more details. - -Relationship with Image API -=========================== - -Here we discuss about Glance's API and how Nova uses image UUIDs. -We also discuss how Nova proxies setting image metadata. - -TODO - add more details. - -Interactions with neutron and nova-network (deprecated) -======================================================= - -We talk about how networking can be provided be either neutron or -nova-network (deprecated). - -Here we discuss about Neutron's API and how Nova users port UUIDs. -We also discuss Nova automatically creating ports, proxying security groups, -and proxying floating IPs. Also talk about the APIs we do not proxy. - -TODO - add more details. diff --git a/api-guide/source/index.rst b/api-guide/source/index.rst index 4c1b425f74e..2e6ac8042b7 100644 --- a/api-guide/source/index.rst +++ b/api-guide/source/index.rst @@ -24,7 +24,7 @@ compute resources might be Virtual Machines, Physical Machines or Containers. This guide covers the concepts in the OpenStack Compute API. For a full reference listing, please see: -`Compute API Reference `__. +`Compute API Reference `__. We welcome feedback, comments, and bug reports at `bugs.launchpad.net/nova `__. @@ -60,7 +60,7 @@ the following endpoints: * / - list of available versions * /v2 - the first version of the Compute API, uses extensions - (we call this Compute API v2.0) + (we call this Compute API v2.0) * /v2.1 - same API, except uses microversions While this guide concentrates on documenting the v2.1 API, @@ -79,14 +79,13 @@ Contents general_info server_concepts authentication + extra_specs_and_properties faults limits links_and_references paginated_collections - polling_changes-since_parameter + polling_changes request_and_response_formats - -.. toctree:: - :hidden: - - extra_specs_and_properties + down_cells + port_with_resource_request + accelerator-support diff --git a/api-guide/source/limits.rst b/api-guide/source/limits.rst index c2ed0af8593..a2f6b49edb6 100644 --- a/api-guide/source/limits.rst +++ b/api-guide/source/limits.rst @@ -9,7 +9,7 @@ operators and may differ from one deployment of the OpenStack Compute service to another. Please contact your provider to determine the limits that apply to your account. Your provider may be able to adjust your account's limits if they are too low. Also see the API Reference for -`Limits `__. +`Limits `__. Absolute limits ~~~~~~~~~~~~~~~ @@ -49,4 +49,4 @@ Determine limits programmatically Applications can programmatically determine current account limits. For information, see -`Limits `__. +`Limits `__. diff --git a/api-guide/source/microversions.rst b/api-guide/source/microversions.rst index 1b202665edf..b1590123cbc 100644 --- a/api-guide/source/microversions.rst +++ b/api-guide/source/microversions.rst @@ -27,24 +27,24 @@ There are multiple cases which you can resolve with microversions: - **Older clients with new cloud** -Before using an old client to talk to a newer cloud, the old client can check -the minimum version of microversions to verify whether the cloud is compatible -with the old API. This prevents the old client from breaking with backwards -incompatible API changes. - -Currently the minimum version of microversions is `2.1`, which is a -microversion compatible with the legacy v2 API. That means the legacy v2 API -user doesn't need to worry that their older client software will be broken when -their cloud is upgraded with new versions. And the cloud operator doesn't need -to worry that upgrading their cloud to newer versions will break any user with -older clients that don't expect these changes. + Before using an old client to talk to a newer cloud, the old client can check + the minimum version of microversions to verify whether the cloud is + compatible with the old API. This prevents the old client from breaking with + backwards incompatible API changes. + + Currently the minimum version of microversions is `2.1`, which is a + microversion compatible with the legacy v2 API. That means the legacy v2 API + user doesn't need to worry that their older client software will be broken + when their cloud is upgraded with new versions. And the cloud operator + doesn't need to worry that upgrading their cloud to newer versions will + break any user with older clients that don't expect these changes. - **User discovery of available features between clouds** -The new features can be discovered by microversions. The user client should -check the microversions firstly, and new features are only enabled when clouds -support. In this way, the user client can work with clouds that have deployed -different microversions simultaneously. + The new features can be discovered by microversions. The user client should + check the microversions firstly, and new features are only enabled when + clouds support. In this way, the user client can work with clouds that have + deployed different microversions simultaneously. Version Discovery ================= @@ -52,7 +52,7 @@ Version Discovery The Version API will return the minimum and maximum microversions. These values are used by the client to discover the API's supported microversion(s). -Requests to '/' will get version info for all endpoints. A response would look +Requests to `/` will get version info for all endpoints. A response would look as follows:: { @@ -86,12 +86,12 @@ as follows:: ] } -"version" is the maximum microversion, "min_version" is the minimum +``version`` is the maximum microversion, ``min_version`` is the minimum microversion. If the value is the empty string, it means this endpoint doesn't support microversions; it is a legacy v2 API endpoint -- for example, the endpoint `http://openstack.example.com/v2/` in the above sample. The endpoint `http://openstack.example.com/v2.1/` supports microversions; the maximum -microversion is '2.14', and the minimum microversion is '2.1'. The client +microversion is `2.14`, and the minimum microversion is `2.1`. The client should specify a microversion between (and including) the minimum and maximum microversion to access the endpoint. @@ -117,20 +117,20 @@ following header to specify the microversion:: This acts conceptually like the "Accept" header. Semantically this means: -* If neither `X-OpenStack-Nova-API-Version` nor `OpenStack-API-Version` +* If neither ``X-OpenStack-Nova-API-Version`` nor ``OpenStack-API-Version`` (specifying `compute`) is provided, act as if the minimum supported microversion was specified. -* If both headers are provided, `OpenStack-API-Version` will be preferred. +* If both headers are provided, ``OpenStack-API-Version`` will be preferred. -* If `X-OpenStack-Nova-API-Version` or `OpenStack-API-Version` is provided, +* If ``X-OpenStack-Nova-API-Version`` or ``OpenStack-API-Version`` is provided, respond with the API at that microversion. If that's outside of the range of microversions supported, return 406 Not Acceptable. -* If `X-OpenStack-Nova-API-Version` or `OpenStack-API-Version` has a value - of ``latest`` (special keyword), act as if maximum was specified. +* If ``X-OpenStack-Nova-API-Version`` or ``OpenStack-API-Version`` has a value + of `latest` (special keyword), act as if maximum was specified. -.. warning:: The ``latest`` value is mostly meant for integration testing and +.. warning:: The `latest` value is mostly meant for integration testing and would be dangerous to rely on in client code since microversions are not following semver and therefore backward compatibility is not guaranteed. Clients should always require a specific microversion but limit what is @@ -149,7 +149,7 @@ the response:: The first header specifies the microversion number of the API which was executed. -The `Vary` header is used as a hint to caching proxies that the response +The ``Vary`` header is used as a hint to caching proxies that the response is also dependent on the microversion and not just the body and query parameters. See :rfc:`2616` section 14.44 for details. diff --git a/api-guide/source/paginated_collections.rst b/api-guide/source/paginated_collections.rst index 08f7d137be6..e817642bf2d 100644 --- a/api-guide/source/paginated_collections.rst +++ b/api-guide/source/paginated_collections.rst @@ -4,18 +4,18 @@ Paginated collections To reduce load on the service, list operations return a maximum number of items at a time. The maximum number of items returned is determined -by the compute provider. To navigate the collection, the *``limit``* and -*``marker``* parameters can be set in the URI. For example: +by the compute provider. To navigate the collection, the ``limit`` and +``marker`` parameters can be set in the URI. For example: .. code:: ?limit=100&marker=1234 -The *``marker``* parameter is the ID of the last item in the previous +The ``marker`` parameter is the ID of the last item in the previous list. By default, the service sorts items by create time in descending order. When the service cannot identify a create time, it sorts items by ID. The -*``limit``* parameter sets the page size. Both parameters are optional. If the -client requests a *``limit``* beyond one that is supported by the deployment +``limit`` parameter sets the page size. Both parameters are optional. If the +client requests a ``limit`` beyond one that is supported by the deployment an overLimit (413) fault may be thrown. A marker with an invalid ID returns a badRequest (400) fault. @@ -25,11 +25,11 @@ implementation does not contain ``previous`` links. The last page in the list does not contain a link to "next" page. The following examples illustrate three pages in a collection of servers. The first page was retrieved through a **GET** to -``http://servers.api.openstack.org/v2.1/servers?limit=1``. In these +`http://servers.api.openstack.org/v2.1/servers?limit=1`. In these examples, the *``limit``* parameter sets the page size to a single item. Subsequent links honor the initial page size. Thus, a client can follow links to traverse a paginated collection without having to input the -*``marker``* parameter. +``marker`` parameter. **Example: Servers collection: JSON (first page)** diff --git a/api-guide/source/polling_changes-since_parameter.rst b/api-guide/source/polling_changes-since_parameter.rst deleted file mode 100644 index 52ea273af90..00000000000 --- a/api-guide/source/polling_changes-since_parameter.rst +++ /dev/null @@ -1,28 +0,0 @@ -================================================== -Efficient polling with the Changes-Since parameter -================================================== - -The REST API allows you to poll for the status of certain operations by -performing a **GET** on various elements. Rather than re-downloading and -re-parsing the full status at each polling interval, your REST client -may use the *``changes-since``* parameter to check for changes since a -previous request. The *``changes-since``* time is specified as an `ISO -8601 `__ dateTime -(2011-01-24T17:08Z). The form for the timestamp is CCYY-MM-DDThh:mm:ss. -An optional time zone may be written in by appending the form ±hh:mm -which describes the timezone as an offset from UTC. When the timezone is -not specified (2011-01-24T17:08), the UTC timezone is assumed. If -nothing has changed since the *``changes-since``* time, an empty list is -returned. If data has changed, only the items changed since the -specified time are returned in the response. For example, performing a -**GET** against -https://api.servers.openstack.org/v2.1/servers?\ *``changes-since``*\ =2015-01-24T17:08Z -would list all servers that have changed since Mon, 24 Jan 2015 17:08:00 -UTC. - -To allow clients to keep track of changes, the changes-since filter -displays items that have been *recently* deleted. Both images and -servers contain a ``DELETED`` status that indicates that the resource -has been removed. Implementations are not required to keep track of -deleted resources indefinitely, so sending a changes since time in the -distant past may miss deletions. diff --git a/api-guide/source/polling_changes.rst b/api-guide/source/polling_changes.rst new file mode 100644 index 00000000000..671ad894341 --- /dev/null +++ b/api-guide/source/polling_changes.rst @@ -0,0 +1,81 @@ +================= +Efficient polling +================= + +The REST API allows you to poll for the status of certain operations by +performing a **GET** on various elements. Rather than re-downloading and +re-parsing the full status at each polling interval, your REST client may +use the ``changes-since`` and/or ``changes-before`` parameters to check +for changes within a specified time. + +The ``changes-since`` time or ``changes-before`` time is specified as +an `ISO 8601 `__ dateTime +(`2011-01-24T17:08Z`). The form for the timestamp is **CCYY-MM-DDThh:mm:ss**. +An optional time zone may be written in by appending the form ±hh:mm +which describes the timezone as an offset from UTC. When the timezone is +not specified (`2011-01-24T17:08`), the UTC timezone is assumed. + +The following situations need to be considered: + +* If nothing has changed since the ``changes-since`` time, an empty list is + returned. If data has changed, only the items changed since the specified + time are returned in the response. For example, performing a + **GET** against:: + + https://api.servers.openstack.org/v2.1/servers?changes-since=2015-01-24T17:08Z + + would list all servers that have changed since Mon, 24 Jan 2015 17:08:00 + UTC. + +* If nothing has changed earlier than or equal to the ``changes-before`` + time, an empty list is returned. If data has changed, only the items + changed earlier than or equal to the specified time are returned in the + response. For example, performing a **GET** against:: + + https://api.servers.openstack.org/v2.1/servers?changes-before=2015-01-24T17:08Z + + would list all servers that have changed earlier than or equal to + Mon, 24 Jan 2015 17:08:00 UTC. + +* If nothing has changed later than or equal to ``changes-since``, or + earlier than or equal to ``changes-before``, an empty list is returned. + If data has changed, only the items changed between ``changes-since`` + time and ``changes-before`` time are returned in the response. + For example, performing a **GET** against:: + + https://api.servers.openstack.org/v2.1/servers?changes-since=2015-01-24T17:08Z&changes-before=2015-01-25T17:08Z + + would list all servers that have changed later than or equal to Mon, + 24 Jan 2015 17:08:00 UTC, and earlier than or equal to Mon, 25 Jan 2015 + 17:08:00 UTC. + +Microversion change history for servers, instance actions and migrations +regarding ``changes-since`` and ``changes-before``: + +* The `2.21 microversion`_ allows reading instance actions for a deleted + server resource. +* The `2.58 microversion`_ allows filtering on ``changes-since`` when listing + instance actions for a server. +* The `2.59 microversion`_ allows filtering on ``changes-since`` when listing + migration records. +* The `2.66 microversion`_ adds the ``changes-before`` filter when listing + servers, instance actions and migrations. + +The ``changes-since`` filter nor the ``changes-before`` filter +change any read-deleted behavior in the os-instance-actions or +os-migrations APIs. The os-instance-actions API with the 2.21 microversion +allows retrieving instance actions for a deleted server resource. +The os-migrations API takes an optional ``instance_uuid`` filter parameter +but does not support returning deleted migration records. + +To allow clients to keep track of changes, the ``changes-since`` filter +and ``changes-before`` filter displays items that have been *recently* +deleted. Servers contain a ``DELETED`` status that indicates that the +resource has been removed. Implementations are not required to keep track +of deleted resources indefinitely, so sending a ``changes-since`` time or +a ``changes-before`` time in the distant past may miss deletions. + +.. _2.21 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id19 +.. _2.58 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id53 +.. _2.59 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id54 +.. _2.66 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id59 diff --git a/api-guide/source/port_with_resource_request.rst b/api-guide/source/port_with_resource_request.rst new file mode 100644 index 00000000000..9ca93f2fcff --- /dev/null +++ b/api-guide/source/port_with_resource_request.rst @@ -0,0 +1,53 @@ +================================= +Using ports with resource request +================================= + +Starting from microversion 2.72 nova supports creating servers with neutron +ports having resource request visible as a admin-only port attribute +``resource_request``. For example a neutron port has resource request if it has +a QoS minimum bandwidth rule attached. Deleting such servers or detaching such +ports works since Stein version of nova without requiring any specific +microversion. + +However the following API operations are still not supported in nova: + +* Creating servers with neutron networks having QoS minimum bandwidth rule is + not supported. The user needs to pre-create the port in that neutron network + and create the server with the pre-created port. + +* Attaching Neutron ports and networks having QoS minimum bandwidth rule is not + supported. + +Also the following API operations are not supported in the 19.0.0 (Stein) +version of nova: + +* Moving (resizing, migrating, live-migrating, evacuating, unshelving after + shelve offload) servers with ports having resource request is not yet + supported. + +As of 20.0.0 (Train), nova supports cold migrating and resizing servers with +neutron ports having resource requests if both the source and destination +compute services are upgraded to 20.0.0 (Train) and the +``[upgrade_levels]/compute`` configuration does not prevent the computes from +using the latest RPC version. However cross cell resize and cross cell migrate +operations are still not supported with such ports and Nova will fall back to +same-cell resize if the server has such ports. + +As of 21.0.0 (Ussuri), nova supports evacuating, live migrating and unshelving +servers with neutron ports having resource requests. + +As of 23.0.0 (Wallaby), nova supports attaching neutron ports having QoS +minimum bandwidth rules. + +Extended resource request +~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is expected that neutron 20.0.0 (Yoga) will implement an extended resource +request format via the the ``port-resource-request-groups`` neutron API +extension. As of nova 24.0.0 (Xena), nova already supports this extension if +every nova-compute service is upgraded to Xena version and the +``[upgrade_levels]/compute`` configuration does not prevent the computes from +using the latest RPC version. + +See :nova-doc:`the admin guide ` for +administrative details. diff --git a/api-guide/source/server_concepts.rst b/api-guide/source/server_concepts.rst index 320711592e4..62d8331891a 100644 --- a/api-guide/source/server_concepts.rst +++ b/api-guide/source/server_concepts.rst @@ -59,9 +59,9 @@ server status is one of the following values: - ``SHUTOFF``: The server was powered down by the user, either through the OpenStack Compute API or from within the server. For example, the user - issued a ``shutdown -h`` command from within the server. If the OpenStack - Compute manager detects that the VM was powered down, it transitions the - server to the SHUTOFF status. + issued a :command:`shutdown -h` command from within the server. + If the OpenStack Compute manager detects that the VM was powered down, + it transitions the server to the SHUTOFF status. - ``SOFT_DELETED``: The server is marked as deleted but will remain in the cloud for some configurable amount of time. While soft-deleted, an @@ -69,21 +69,23 @@ server status is one of the following values: expires, the server will be deleted permanently. - ``SUSPENDED``: The server is suspended, either by request or - necessity. This status appears for only the following hypervisors: - XenServer/XCP, KVM, and ESXi. Administrative users may suspend a - server if it is infrequently used or to perform system maintenance. - When you suspend a server, its state is stored on disk, all - memory is written to disk, and the server is stopped. - Suspending a server is similar to placing a device in hibernation; - memory and vCPUs become available to create other servers. - -- ``UNKNOWN``: The state of the server is unknown. Contact your cloud - provider. + necessity. See the + :nova-doc:`feature support matrix ` + for supported compute drivers. When you suspend a server, its state is stored + on disk, all memory is written to disk, and the server is stopped. + Suspending a server is similar to placing a device in hibernation and its + occupied resource will not be freed but rather kept for when the server is + resumed. If an instance is infrequently used and the occupied resource needs + to be freed to create other servers, it should be shelved. + +- ``UNKNOWN``: The state of the server is unknown. It could be because a part + of the infrastructure is temporarily down (see :doc:`down_cells` + for more information). Contact your cloud provider. - ``VERIFY_RESIZE``: System is awaiting confirmation that the server is operational after a move or resize. -Server status is caculated from vm_state and task_state, which +Server status is calculated from vm_state and task_state, which are exposed to administrators: - vm_state describes a VM's current stable (not transition) state. That is, if @@ -93,8 +95,8 @@ are exposed to administrators: Refer to :nova-doc:`VM States `. - task_state represents what is happening to the instance at the - current moment. These tasks can be generic, such as 'spawning', or specific, - such as 'block_device_mapping'. These task states allow for a better view into + current moment. These tasks can be generic, such as `spawning`, or specific, + such as `block_device_mapping`. These task states allow for a better view into what a server is doing. Server creation @@ -102,24 +104,34 @@ Server creation Status Transition: -``BUILD`` +- ``BUILD`` + + While the server is building there are several task state transitions that + can occur: + + - ``scheduling``: The request is being scheduled to a compute node. + - ``networking``: Setting up network interfaces asynchronously. + - ``block_device_mapping``: Preparing block devices (local disks, volumes). + - ``spawning``: Creating the guest in the hypervisor. + +- ``ACTIVE`` -``ACTIVE`` + The terminal state for a successfully built and running server. -``ERROR`` (on error) +- ``ERROR`` (on error) -When you create a server, the operation asynchronously provisions a new -server. The progress of this operation depends on several factors -including location of the requested image, network I/O, host load, and -the selected flavor. The progress of the request can be checked by -performing a **GET** on /servers/*``id``*, which returns a progress -attribute (from 0% to 100% complete). The full URL to the newly created -server is returned through the ``Location`` header and is available as a -``self`` and ``bookmark`` link in the server representation. Note that -when creating a server, only the server ID, its links, and the -administrative password are guaranteed to be returned in the request. -You can retrieve additional attributes by performing subsequent **GET** -operations on the server. + When you create a server, the operation asynchronously provisions a new + server. The progress of this operation depends on several factors + including location of the requested image, network I/O, host load, and + the selected flavor. The progress of the request can be checked by + performing a **GET** on /servers/*{server_id}*, which returns a progress + attribute (from 0% to 100% complete). The full URL to the newly created + server is returned through the ``Location`` header and is available as a + ``self`` and ``bookmark`` link in the server representation. Note that + when creating a server, only the server ID, its links, and the + administrative password are guaranteed to be returned in the request. + You can retrieve additional attributes by performing subsequent **GET** + operations on the server. Server query ~~~~~~~~~~~~ @@ -131,10 +143,35 @@ by using query options. For different user roles, the user has different query options set: - For general user, there is limited set of attributes of the servers can be - used as query option. ``reservation_id``, ``name``, ``status``, ``image``, - ``flavor``, ``ip``, ``changes-since``, ``ip6``, ``tags``, ``tags-any``, - ``not-tags``, ``not-tags-any`` are supported options to be used. Other - options will be ignored by nova silently. + used as query option. The supported options are: + + - ``changes-since`` + - ``flavor`` + - ``image`` + - ``ip`` + - ``ip6`` (New in version 2.5) + - ``name`` + - ``not-tags`` (New in version 2.26) + - ``not-tags-any`` (New in version 2.26) + - ``reservation_id`` + - ``status`` + - ``tags`` (New in version 2.26) + - ``tags-any`` (New in version 2.26) + - ``changes-before`` (New in version 2.66) + - ``locked`` (New in version 2.73) + - ``availability_zone`` (New in version 2.83) + - ``config_drive`` (New in version 2.83) + - ``key_name`` (New in version 2.83) + - ``created_at`` (New in version 2.83) + - ``launched_at`` (New in version 2.83) + - ``terminated_at`` (New in version 2.83) + - ``power_state`` (New in version 2.83) + - ``task_state`` (New in version 2.83) + - ``vm_state`` (New in version 2.83) + - ``progress`` (New in version 2.83) + - ``user_id`` (New in version 2.83) + + Other options will be ignored by nova silently. - For administrator, most of the server attributes can be used as query options. Before the Ocata release, the fields in the database schema of @@ -144,31 +181,37 @@ For different user roles, the user has different query options set: the query options are different from the attribute naming in the servers API response. -.. code:: - Precondition: - there are 2 servers existing in cloud with following info: +Precondition: there are 2 servers existing in cloud with following info:: - "servers": [ - { - "name": "t1", - "locked": "true", - ... - }, - { - "name": "t2", - "locked": "false", - ... - } - ] + { + "servers": [ + { + "name": "t1", + "OS-EXT-SRV-ATTR:host": "devstack1", + ... + }, + { + "name": "t2", + "OS-EXT-SRV-ATTR:host": "devstack2", + ... + } + ] + } + +**Example: General user query server with administrator only options** - **Example: General user query server with administrator only options** +Request with non-administrator context: ``GET /servers/detail?host=devstack1`` - Request with non-administrator context: - GET /servers/detail?locked=1 - Note that 'locked' is not returned through API layer +.. note:: + + The ``host`` query parameter is only for administrator users and + the query parameter is ignored if specified by non-administrator users. + Thus the API returns servers of both ``devstack1`` and ``devstack2`` + in this example. + +Response:: - Response: { "servers": [ { @@ -182,12 +225,12 @@ For different user roles, the user has different query options set: ] } - **Example: Administrator query server with administrator only options** +**Example: Administrator query server with administrator only options** + +Request with administrator context: ``GET /servers/detail?host=devstack1`` - Request with administrator context: - GET /servers/detail?locked=1 +Response:: - Response: { "servers": [ { @@ -197,10 +240,13 @@ For different user roles, the user has different query options set: ] } -There are also some speical query options: +There are also some special query options: - ``changes-since`` returns the servers updated after the given time. - Please see: :doc:`polling_changes-since_parameter` + Please see: :doc:`polling_changes` + +- ``changes-before`` returns the servers updated before the given time. + Please see: :doc:`polling_changes` - ``deleted`` returns (or excludes) deleted servers @@ -210,52 +256,80 @@ There are also some speical query options: - ``all_tenants`` is an administrator query option, which allows the administrator to query the servers in any tenant. -.. code:: - **Example: User query server with special keys changes-since** +**Example: User query server with special keys changes-since or changes-before** + +Request: ``GET /servers/detail`` - Precondition: - GET /servers/detail +Response:: - Response: { "servers": [ { - "name": "t1" - "updated": "2015-12-15T15:55:52Z" + "name": "t1", + "updated": "2015-12-15T15:55:52Z", ... }, { "name": "t2", - "updated": "2015-12-17T15:55:52Z" + "updated": "2015-12-17T15:55:52Z", ... } ] } - GET /servers/detail?changes-since='2015-12-16T15:55:52Z' +Request: ``GET /servers/detail?changes-since='2015-12-16T15:55:52Z'`` + +Response:: - Response: { { "name": "t2", - "updated": "2015-12-17T15:55:52Z" + "updated": "2015-12-17T15:55:52Z", ... } } +Request: ``GET /servers/detail?changes-before='2015-12-16T15:55:52Z'`` + +Response:: + + { + { + "name": "t1", + "updated": "2015-12-15T15:55:52Z", + ... + } + } + +Request: +``GET /servers/detail?changes-since='2015-12-10T15:55:52Z'&changes-before='2015-12-28T15:55:52Z'`` + +Response:: + + { + "servers": [ + { + "name": "t1", + "updated": "2015-12-15T15:55:52Z", + ... + }, + { + "name": "t2", + "updated": "2015-12-17T15:55:52Z", + ... + } + ] + } + There are two kinds of matching in query options: Exact matching and regex matching. -.. code:: - - **Example: User query server using exact matching on host** +**Example: User query server using exact matching on host** - Precondition: - Request with administrator context: - GET /servers/detail +Request with administrator context: ``GET /servers/detail`` - Response: +Response:: { "servers": [ @@ -272,10 +346,9 @@ regex matching. ] } - Request with administrator context: - GET /servers/detail?host=devstack +Request with administrator context: ``GET /servers/detail?host=devstack`` - Response: +Response:: { "servers": [ @@ -287,13 +360,11 @@ regex matching. ] } - **Example: Query server using regex matching on name** +**Example: Query server using regex matching on name** - Precondition: - Request with administrator context: - GET /servers/detail +Request with administrator context: ``GET /servers/detail`` - Response: +Response:: { "servers": [ @@ -316,10 +387,9 @@ regex matching. ] } - Request with administrator context: - GET /servers/detail?name=t1 +Request with administrator context: ``GET /servers/detail?name=t1`` - Response: +Response:: { "servers": [ @@ -338,14 +408,12 @@ regex matching. ] } - **Example: User query server using exact matching on host and - regex matching on name** +**Example: User query server using exact matching on host and regex +matching on name** - Precondition: - Request with administrator context: - GET /servers/detail +Request with administrator context: ``GET /servers/detail`` - Response: +Response:: { "servers": [ @@ -367,10 +435,10 @@ regex matching. ] } - Request with administrator context: - GET /servers/detail?host=devstack1&name=test +Request with administrator context: +``GET /servers/detail?host=devstack1&name=test`` - Response: +Response:: { "servers": [ @@ -382,16 +450,10 @@ regex matching. ] } - "name": "t2", - "updated": "2015-12-17T15:55:52Z" - ... - } - ] - } +Request: ``GET /servers/detail?changes-since='2015-12-16T15:55:52Z'`` - GET /servers/detail?changes-since='2015-12-16T15:55:52Z' +Response:: - Response: { { "name": "t2", @@ -436,9 +498,9 @@ Server actions flavor, in essence, scaling the server up or down. The original server is saved for a period of time to allow rollback if there is a problem. All resizes should be tested and explicitly confirmed, at - which time the original server is removed. All resizes are - automatically confirmed after 24 hours if you do not confirm or - revert them. + which time the original server is removed. The resized server may be + automatically confirmed based on the administrator's configuration of + the deployment. Confirm resize action will delete the old server in the virt layer. The spawned server in the virt layer will be used from then on. @@ -446,12 +508,6 @@ Server actions spawned in the virt layer and revert all changes. The original server will be used from then on. - Also, there is a periodic task configured by configuration option - resize_confirm_window(in seconds), if this value is not 0, nova compute - will check whether the server is in resized state longer than - value of resize_confirm_window, it will automatically confirm the resize - of the server. - - **Pause**, **Unpause** You can pause a server by making a pause request. This request stores @@ -535,13 +591,62 @@ Server actions - **Lock**, **Unlock** - Lock a server so no further actions are allowed to the server. This can - be done by either administrator or the server's owner. By default, only owner - or administrator can lock the sever, and administrator can overwrite owner's lock. + Lock a server so the following actions by non-admin users are not + allowed to the server. + + - Delete Server + - Change Administrative Password (changePassword Action) + - Confirm Resized Server (confirmResize Action) + - Force-Delete Server (forceDelete Action) + - Pause Server (pause Action) + - Reboot Server (reboot Action) + - Rebuild Server (rebuild Action) + - Rescue Server (rescue Action) + - Resize Server (resize Action) + - Restore Soft-Deleted Instance (restore Action) + - Resume Suspended Server (resume Action) + - Revert Resized Server (revertResize Action) + - Shelve-Offload (Remove) Server (shelveOffload Action) + - Shelve Server (shelve Action) + - Start Server (os-start Action) + - Stop Server (os-stop Action) + - Suspend Server (suspend Action) + - Trigger Crash Dump In Server + - Unpause Server (unpause Action) + - Unrescue Server (unrescue Action) + - Unshelve (Restore) Shelved Server (unshelve Action) + - Attach a volume to an instance + - Update a volume attachment + - Detach a volume from an instance + - Create Interface + - Detach Interface + - Create Or Update Metadata Item + - Create or Update Metadata Items + - Delete Metadata Item + - Replace Metadata Items + - Add (Associate) Fixed Ip (addFixedIp Action) (DEPRECATED) + - Remove (Disassociate) Fixed Ip (removeFixedIp Action) (DEPRECATED) + + .. + NOTE(takashin): + The following APIs can be performed by administrators only by default. + So they are not listed in the above list. + + - Migrate Server (migrate Action) + - Live-Migrate Server (os-migrateLive Action) + - Force Migration Complete Action (force_complete Action) + - Delete (Abort) Migration + - Inject Network Information (injectNetworkInfo Action) + - Reset Networking On A Server (resetNetwork Action) + + But administrators can perform the actions on the server + even though the server is locked. By default, only owner or administrator + can lock the sever, and administrator can overwrite owner's lock along with + the locked_reason if it is specified. Unlock will unlock a server in locked state so additional - operations can be performed on the server. By default, only owner or - administrator can unlock the server. + operations can be performed on the server by non-admin users. + By default, only owner or administrator can unlock the server. - **Rescue**, **Unrescue** @@ -603,12 +708,85 @@ limit. Block Device Mapping ~~~~~~~~~~~~~~~~~~~~ -TODO: Add some description about BDM. +Simply speaking, Block Device Mapping describes how block devices are +exposed to the server. + +For some historical reasons, nova has two ways to mention the block device +mapping in server creation request body: + +- ``block_device_mapping``: This is the legacy way and supports backward + compatibility for EC2 API. +- ``block_device_mapping_v2``: This is the recommended format to specify + Block Device Mapping information in server creation request body. + +Users cannot mix the two formats in the same request. + +For more information, refer to `Block Device Mapping +`_. + +For the full list of ``block_device_mapping_v2`` parameters available when +creating a server, see the `API reference +`_. + +**Example for block_device_mapping_v2** + +This will create a 100GB size volume type block device from an image with UUID +of ``bb02b1a3-bc77-4d17-ab5b-421d89850fca``. It will be used as the first order +boot device (``boot_index=0``), and this block device will not be deleted after +we terminate the server. Note that the ``imageRef`` parameter is not required +in this case since we are creating a volume-backed server. + +.. code-block:: json + + { + "server": { + "name": "volume-backed-server-test", + "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37", + "block_device_mapping_v2": [ + { + "boot_index": 0, + "uuid": "bb02b1a3-bc77-4d17-ab5b-421d89850fca", + "volume_size": "100", + "source_type": "image", + "destination_type": "volume", + "delete_on_termination": false + } + ] + } + } Scheduler Hints ~~~~~~~~~~~~~~~ -TODO: Add description about how to custom scheduling policy for server booting. +Scheduler hints are a way for the user to influence on which host the scheduler +places a server. They are pre-determined key-value pairs specified as a +dictionary separate from the main ``server`` dictionary in the server create +request. Available scheduler hints vary from cloud to cloud, depending on the +`cloud's configuration`_. + +.. code-block:: json + + { + "server": { + "name": "server-in-group", + "imageRef": "52415800-8b69-11e0-9b19-734f6f006e54", + "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37" + }, + "os:scheduler_hints": { + "group": "05a81485-010f-4df1-bbec-7821c85686e8" + } + } + + +For more information on how to specify scheduler hints refer to +`the create-server-detail Request section`_ in the Compute API reference. + +For more information on how scheduler hints are different from flavor extra +specs, refer to `this document`_. + +.. _cloud's configuration: https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html +.. _the create-server-detail Request section: https://docs.openstack.org/api-ref/compute/?expanded=create-server-detail#create-server +.. _this document: https://docs.openstack.org/nova/latest/reference/scheduler-hints-vs-flavor-extra-specs.html#scheduler-hints Server Consoles ~~~~~~~~~~~~~~~ @@ -616,12 +794,11 @@ Server Consoles Server Consoles can also be supplied after server launched. There are several server console services available. First, users can get the console output from the specified server and can limit the lines of console text by setting -the length. Second, users can access multiple types of remote consoles. The -user can use novnc, xvpvnc, rdp-html5, spice-html5, serial, and webmks(start -from microversion 2.8) through either the OpenStack dashboard or the command -line. Refer to :nova-doc:`Configure remote console access -`. Specifically for Xenserver, it provides -the ability to create, delete, detail, list specified server vnc consoles. +the length. Secondly, users can access multiple types of remote consoles. The +user can use ``novnc``, ``rdp-html5``, ``spice-html5``, ``serial``, and +``webmks`` (starting from microversion 2.8) through either the OpenStack +dashboard or the command line. Refer to :nova-doc:`Configure remote console +access `. Server networks ~~~~~~~~~~~~~~~ @@ -631,28 +808,6 @@ time. One or more networks can be specified. User can also specify a specific port on the network or the fixed IP address to assign to the server interface. -Considerations -~~~~~~~~~~~~~~ - -- The maximum limit refers to the number of bytes in the decoded data - and not the number of characters in the encoded data. - -- The maximum number of file path/content pairs that you can supply is - also determined by the compute provider and is defined by the - maxPersonality absolute limit. - -- The absolute limit, maxPersonalitySize, is a byte limit that is - guaranteed to apply to all images in the deployment. Providers can - set additional per-image personality limits. - -- The file injection might not occur until after the server is built and - booted. - -- After file injection, personality files are accessible by only system - administrators. For example, on Linux, all files have root and the root - group as the owner and group owner, respectively, and allow user and - group read access only (octal 440). - Server access addresses ~~~~~~~~~~~~~~~~~~~~~~~ @@ -672,7 +827,7 @@ assigned at creation time. **Example: Create server with access IP: JSON request** -.. code:: +.. code-block:: json { "server": { @@ -690,7 +845,7 @@ assigned at creation time. **Example: Create server with multiple access IPs: JSON request** -.. code:: +.. code-block:: json { "server": { @@ -769,7 +924,7 @@ a cloud: This process can be repeated until the whole cloud has been updated, usually using a pool of empty hosts instead of just one. -- **Resource Optimization** +- **Resource Optimization** To reduce energy usage, some cloud operators will try and move servers so they fit into the minimum number of hosts, allowing @@ -880,10 +1035,11 @@ Configure Guest OS Metadata API ------------ -Nova provides a metadata api for servers to retrieve server specific metadata. -Neutron ensures this metadata api can be accessed through a predefined ip -address (169.254.169.254). For more details, see :nova-doc:`Metadata Service -`. + +Nova provides a metadata API for servers to retrieve server specific metadata. +Neutron ensures this metadata API can be accessed through a predefined IP +address, ``169.254.169.254``. For more details, refer to the :nova-doc:`user +guide `. Config Drive ------------ @@ -891,20 +1047,19 @@ Config Drive Nova is able to write metadata to a special configuration drive that attaches to the server when it boots. The server can mount this drive and read files from it to get information that is normally available through the metadata -service. For more details, see :nova-doc:`Config Drive -`. +service. For more details, refer to the :nova-doc:`user guide +`. User data --------- + A user data file is a special key in the metadata service that holds a file that cloud-aware applications in the server can access. -Nova has two ways to send user data to the deployed server, one is by -metadata service to let server able to access to its metadata through -a predefined ip address (169.254.169.254), then other way is to use config -drive which will wrap metadata into a iso9660 or vfat format disk so that -the deployed server can consume it by active engines such as cloud-init -during its boot process. +This information can be accessed via the metadata API or a config drive. The +latter allows the deployed server to consume it by active engines such as +cloud-init during its boot process, where network connectivity may not be an +option. Server personality ------------------ @@ -923,3 +1078,24 @@ Follow these guidelines when you inject files: - Encode the file contents as a Base64 string. The maximum size of the file contents is determined by the compute provider and may vary based on the image that is used to create the server. + +Considerations: + +- The maximum limit refers to the number of bytes in the decoded data + and not the number of characters in the encoded data. + +- The maximum number of file path/content pairs that you can supply is + also determined by the compute provider and is defined by the + maxPersonality absolute limit. + +- The absolute limit, maxPersonalitySize, is a byte limit that is + guaranteed to apply to all images in the deployment. Providers can + set additional per-image personality limits. + +- The file injection might not occur until after the server is built and + booted. + +- After file injection, personality files are accessible by only system + administrators. For example, on Linux, all files have root and the root + group as the owner and group owner, respectively, and allow user and + group read access only (octal 440). diff --git a/api-guide/source/users.rst b/api-guide/source/users.rst index 7128a5ac66f..a0b74374a2f 100644 --- a/api-guide/source/users.rst +++ b/api-guide/source/users.rst @@ -27,7 +27,9 @@ Keystone middleware is used to authenticate users and identify their roles. The Compute API uses these roles, along with oslo.policy, to decide what the user is authorized to do. -TODO - link to compute admin guide for details. +Refer to the to +:nova-doc:`compute admin guide ` +for details. Personas used in this guide =========================== @@ -47,16 +49,18 @@ cloud administrator permissions, such as a read-only role that is able to view a lists of servers for a specific tenant but is not able to perform any actions on any of them. -Note: this is not attempting to be an exhaustive set of personas that consider -various facets of the different users but instead aims to be a minimal set of -users such that we use a consistent terminology throughout this document. +.. note:: -TODO - could assign names to these users, or similar, to make it more "real". + This is not attempting to be an exhaustive set of personas that consider + various facets of the different users but instead aims to be a minimal set of + users such that we use a consistent terminology throughout this document. Discovering Policy ================== An API to discover what actions you are authorized to perform is still a work -in progress. Currently this reported by a HTTP 403 error. +in progress. Currently this reported by a HTTP 403 +:ref:`error `. -TODO - link to the doc on errors. +Refer to the :nova-doc:`configuration guide ` for a list +of policy rules along with their default values. diff --git a/api-guide/source/versions.rst b/api-guide/source/versions.rst index ee8ce9fed6c..4019899e812 100644 --- a/api-guide/source/versions.rst +++ b/api-guide/source/versions.rst @@ -4,8 +4,8 @@ Versions The OpenStack Compute API uses both a URI and a MIME type versioning scheme. In the URI scheme, the first element of the path contains the -target version identifier (e.g. https://servers.api.openstack.org/ -v2.1/...). The MIME type versioning scheme uses HTTP content negotiation +target version identifier (e.g. `https://servers.api.openstack.org/ +v2.1/`...). The MIME type versioning scheme uses HTTP content negotiation where the ``Accept`` or ``Content-Type`` headers contains a MIME type that identifies the version (application/vnd.openstack.compute.v2.1+json). A version MIME type is always linked to a base MIME type, such as @@ -37,7 +37,7 @@ Permanent Links The MIME type versioning approach allows for creating of permanent links, because the version scheme is not specified in the URI path: -https://api.servers.openstack.org/224532/servers/123. +`https://api.servers.openstack.org/224532/servers/123`. If a request is made without a version specified in the URI or via HTTP headers, then a multiple-choices response (300) follows that provides @@ -99,13 +99,13 @@ everything following that truncated) returned from the authentication system. You can also obtain additional information about a specific version by performing a **GET** on the base version URL (such as, -``https://servers.api.openstack.org/v2.1/``). Version request URLs must -always end with a trailing slash (``/``). If you omit the slash, the +`https://servers.api.openstack.org/v2.1/`). Version request URLs must +always end with a trailing slash (`/`). If you omit the slash, the server might respond with a 302 redirection request. For examples of the list versions and get version details requests and -responses, see `*API versions* -`__. +responses, see `API versions +`__. The detailed version response contains pointers to both a human-readable and a machine-processable description of the API service. diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py index 53c90dd1b88..ddcca926e72 100644 --- a/api-ref/source/conf.py +++ b/api-ref/source/conf.py @@ -22,8 +22,6 @@ # All configuration values have a default; values that are commented out # serve to show the default. -from nova.version import version_info - extensions = [ 'openstackdocstheme', @@ -42,25 +40,15 @@ master_doc = 'index' # General information about the project. -project = u'Compute API Reference' copyright = u'2010-present, OpenStack Foundation' # openstackdocstheme options -repository_name = 'openstack/nova' -bug_project = 'nova' -bug_tag = 'api-ref' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() +openstackdocs_repo_name = 'openstack/nova' +openstackdocs_bug_project = 'nova' +openstackdocs_bug_tag = 'api-ref' # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = 'native' # -- Options for HTML output -------------------------------------------------- @@ -75,10 +63,6 @@ "sidebar_mode": "toc", } -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' - # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples diff --git a/api-ref/source/extensions.inc b/api-ref/source/extensions.inc index 28123a89d05..7621dd3db47 100644 --- a/api-ref/source/extensions.inc +++ b/api-ref/source/extensions.inc @@ -19,9 +19,6 @@ code to interact with every cloud. As such, the entire extensions concept is deprecated, and will be removed in the near future. -For information about extensions, see `Extensions -`__. - List Extensions =============== diff --git a/api-ref/source/flavors.inc b/api-ref/source/flavors.inc index d3d8e4d453a..0216ce29838 100644 --- a/api-ref/source/flavors.inc +++ b/api-ref/source/flavors.inc @@ -108,9 +108,9 @@ Response - extra_specs: extra_specs_2_61 -**Example Create Flavor (v2.61)** +**Example Create Flavor (v2.75)** -.. literalinclude:: ../../doc/api_samples/flavor-manage/v2.61/flavor-create-post-resp.json +.. literalinclude:: ../../doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json :language: javascript List Flavors With Details @@ -158,9 +158,9 @@ Response - os-flavor-access:is_public: flavor_is_public - extra_specs: extra_specs_2_61 -**Example List Flavors With Details (v2.61)** +**Example List Flavors With Details (v2.75)** -.. literalinclude:: ../../doc/api_samples/flavors/v2.61/flavors-detail-resp.json +.. literalinclude:: ../../doc/api_samples/flavors/v2.75/flavors-detail-resp.json :language: javascript Show Flavor Details @@ -201,9 +201,9 @@ Response - os-flavor-access:is_public: flavor_is_public - extra_specs: extra_specs_2_61 -**Example Show Flavor Details (v2.61)** +**Example Show Flavor Details (v2.75)** -.. literalinclude:: ../../doc/api_samples/flavors/v2.61/flavor-get-resp.json +.. literalinclude:: ../../doc/api_samples/flavors/v2.75/flavor-get-resp.json :language: javascript Update Flavor Description @@ -244,7 +244,7 @@ Response - flavor: flavor - name: flavor_name - - description: flavor_description_resp + - description: flavor_description_resp_no_min - id: flavor_id_body - ram: flavor_ram - disk: flavor_disk @@ -258,9 +258,9 @@ Response - extra_specs: extra_specs_2_61 -**Example Update Flavor Description (v2.61)** +**Example Update Flavor Description (v2.75)** -.. literalinclude:: ../../doc/api_samples/flavor-manage/v2.61/flavor-update-resp.json +.. literalinclude:: ../../doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json :language: javascript Delete Flavor diff --git a/api-ref/source/images.inc b/api-ref/source/images.inc index 9b7bb3a25dc..621641281f7 100644 --- a/api-ref/source/images.inc +++ b/api-ref/source/images.inc @@ -13,7 +13,7 @@ The image metadata APIs will fail with a 404 starting from microversion 2.39. See: `Relevant Image APIs - `__. + `__. Lists, shows details and deletes images. Also sets, lists, shows details, create, update and deletes image metadata. @@ -21,7 +21,7 @@ Also sets, lists, shows details, create, update and deletes image metadata. An image is a collection of files that you use to create and rebuild a server. By default, operators provide pre-built operating system images. You can also create custom images. See: `Create Image Action -`__. +`__. By default, the ``policy.json`` file authorizes all users to view the image size in the ``OS-EXT-IMG-SIZE:size`` extended attribute. @@ -103,7 +103,7 @@ Response - name: image_name - minRam: minRam_body - minDisk: minDisk_body - - metadata: metadata_object + - metadata: metadata_object - created: created - updated: updated - status: image_status @@ -147,7 +147,7 @@ Response - name: image_name - minRam: minRam_body - minDisk: minDisk_body - - metadata: metadata_object + - metadata: metadata_object - created: created - updated: updated - status: image_status @@ -208,7 +208,7 @@ Response .. rest_parameters:: parameters.yaml - - metadata: metadata_object + - metadata: metadata_object **Example List Image Metadata Details: JSON response** @@ -233,7 +233,7 @@ Request .. rest_parameters:: parameters.yaml - image_id: image_id - - metadata: metadata_object + - metadata: metadata_object **Example Create Image Metadata: JSON request** @@ -245,7 +245,7 @@ Response .. rest_parameters:: parameters.yaml - - metadata: metadata_object + - metadata: metadata_object **Example Create Image Metadata: JSON response** @@ -270,7 +270,7 @@ Request .. rest_parameters:: parameters.yaml - image_id: image_id - - metadata: metadata_object + - metadata: metadata_object **Example Update Image Metadata: JSON request** @@ -282,7 +282,7 @@ Response .. rest_parameters:: parameters.yaml - - metadata: metadata_object + - metadata: metadata_object **Example Update Image Metadata: JSON response** diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst index e52c84cc526..e0bba1504a5 100644 --- a/api-ref/source/index.rst +++ b/api-ref/source/index.rst @@ -6,7 +6,7 @@ This is a reference for the OpenStack Compute API which is provided by the Nova project. To learn more about the OpenStack Compute API concepts, please refer to -the `API guide `_. +the `API guide `_. .. rest_expand_all:: @@ -16,13 +16,13 @@ the `API guide `_. .. include:: servers.inc .. include:: servers-actions.inc .. include:: servers-action-fixed-ip.inc -.. include:: servers-action-evacuate.inc .. include:: servers-action-deferred-delete.inc .. include:: servers-action-console-output.inc .. include:: servers-action-shelve.inc .. include:: servers-action-crash-dump.inc .. include:: servers-action-remote-consoles.inc .. include:: servers-admin-action.inc +.. include:: servers-action-evacuate.inc .. include:: servers-remote-consoles.inc .. include:: server-security-groups.inc .. include:: diagnostics.inc @@ -37,12 +37,9 @@ the `API guide `_. .. include:: os-flavor-extra-specs.inc .. include:: os-keypairs.inc .. include:: limits.inc -.. include:: os-agents.inc .. include:: os-aggregates.inc .. include:: os-assisted-volume-snapshots.inc .. include:: os-availability-zone.inc -.. include:: os-cells.inc -.. include:: os-consoles.inc .. include:: os-hypervisors.inc .. include:: os-instance-usage-audit-log.inc .. include:: os-migrations.inc @@ -54,6 +51,15 @@ the `API guide `_. .. include:: os-services.inc .. include:: os-simple-tenant-usage.inc .. include:: os-server-external-events.inc +.. include:: server-topology.inc + +=============== +Deprecated APIs +=============== + +This section contains references for APIs which are deprecated and usually +limited to some maximum microversion. + .. include:: extensions.inc .. include:: os-networks.inc .. include:: os-volumes.inc @@ -63,7 +69,6 @@ the `API guide `_. .. include:: os-floating-ip-pools.inc .. include:: os-floating-ips.inc .. include:: os-security-groups.inc -.. include:: os-security-group-default-rules.inc .. include:: os-security-group-rules.inc .. include:: os-hosts.inc @@ -81,3 +86,7 @@ Compute API in the past, but no longer exist. .. include:: os-fixed-ips.inc .. include:: os-floating-ips-bulk.inc .. include:: os-floating-ip-dns.inc +.. include:: os-cells.inc +.. include:: os-consoles.inc +.. include:: os-security-group-default-rules.inc +.. include:: os-agents.inc diff --git a/api-ref/source/limits.inc b/api-ref/source/limits.inc index 2fb3030780f..2329c67faaf 100644 --- a/api-ref/source/limits.inc +++ b/api-ref/source/limits.inc @@ -32,25 +32,25 @@ Response - limits: limits - absolute: limits_absolutes - - maxImageMeta: image_metadata_items - - maxPersonality: injected_files - - maxPersonalitySize: injected_file_content_bytes - - maxSecurityGroupRules: security_group_rules_quota - - maxSecurityGroups: security_groups_quota - maxServerGroupMembers: server_group_members - maxServerGroups: server_groups - maxServerMeta: metadata_items - maxTotalCores: cores - - maxTotalFloatingIps: floating_ips - maxTotalInstances: instances - maxTotalKeypairs: key_pairs - maxTotalRAMSize: ram - totalCoresUsed: total_cores_used - - totalFloatingIpsUsed: total_floatingips_used - totalInstancesUsed: total_instances_used - totalRAMUsed: total_ram_used - - totalSecurityGroupsUsed: total_security_groups_used - totalServerGroupsUsed: total_server_groups_used + - maxSecurityGroupRules: security_group_rules_quota + - maxSecurityGroups: security_groups_quota + - maxTotalFloatingIps: floating_ips + - totalFloatingIpsUsed: total_floatingips_used + - totalSecurityGroupsUsed: total_security_groups_used + - maxImageMeta: image_metadata_items + - maxPersonality: injected_files + - maxPersonalitySize: injected_file_content_bytes - rate: limits_rates **Example Show Rate And Absolute Limits: JSON response** diff --git a/api-ref/source/os-agents.inc b/api-ref/source/os-agents.inc index 33bd9fbd3a7..f9ab86a62c3 100644 --- a/api-ref/source/os-agents.inc +++ b/api-ref/source/os-agents.inc @@ -11,6 +11,12 @@ hypervisor-specific extension is currently only for the Xen driver. Use of guest agents is possible only if the underlying service provider uses the Xen driver. +.. warning:: + + These APIs only works with the Xen virt driver, which was deprecated in the + 20.0.0 (Train) release. + They were removed in the 22.0.0 (Victoria) release. + List Agent Builds ================= @@ -20,7 +26,7 @@ Lists agent builds. Normal response codes: 200 -Error response codes: unauthorized(401), forbidden(403) +Error response codes: unauthorized(401), forbidden(403), gone(410) Request ------- @@ -58,7 +64,8 @@ Creates an agent build. Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +conflict(409), gone(410) Request ------- @@ -106,7 +113,8 @@ Updates an agent build. Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +itemNotFound(404), gone(410) Request ------- @@ -150,7 +158,8 @@ Deletes an existing agent build. Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +itemNotFound(404), gone(410) Request ------- diff --git a/api-ref/source/os-aggregates.inc b/api-ref/source/os-aggregates.inc index ecfefe59ac6..6021c8e8607 100644 --- a/api-ref/source/os-aggregates.inc +++ b/api-ref/source/os-aggregates.inc @@ -5,8 +5,12 @@ ================================ Creates and manages host aggregates. An aggregate assigns metadata to -groups of compute nodes. Aggregates are only visible to the cloud -provider. +groups of compute nodes. + +Policy defaults enable only users with the administrative role to perform +operations with aggregates. Cloud providers can change these permissions +through `policy file configuration +`__. List Aggregates =============== @@ -31,7 +35,7 @@ Response - deleted: deleted - hosts: aggregate_host_list - id: aggregate_id_body - - metadata: aggregate_metadata + - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid @@ -60,7 +64,7 @@ Request - aggregate: aggregate - name: aggregate_name - - availability_zone: aggregate_az_optional + - availability_zone: aggregate_az_optional_create **Example Create Aggregate: JSON request** @@ -117,7 +121,7 @@ Response - deleted: deleted - hosts: hosts - id: aggregate_id_body - - metadata: aggregate_metadata + - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid @@ -149,7 +153,7 @@ Request - aggregate_id: aggregate_id - aggregate: aggregate - name: aggregate_name_optional - - availability_zone: aggregate_az_optional + - availability_zone: aggregate_az_optional_update **Example Update Aggregate: JSON request** @@ -168,7 +172,7 @@ Response - deleted: deleted - hosts: hosts - id: aggregate_id_body - - metadata: aggregate_metadata + - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid @@ -241,7 +245,7 @@ Response - deleted: deleted - hosts: hosts - id: aggregate_id_body - - metadata: aggregate_metadata + - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid @@ -291,7 +295,7 @@ Response - deleted: deleted - hosts: hosts - id: aggregate_id_body - - metadata: aggregate_metadata + - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid @@ -322,7 +326,7 @@ Request - aggregate_id: aggregate_id - set_metadata: set_metadata - - metadata: metadata_object + - metadata: aggregate_metadata_request **Example Create Or Update Aggregate Metadata: JSON request** @@ -341,7 +345,7 @@ Response - deleted: deleted - hosts: hosts - id: aggregate_id_body - - metadata: aggregate_metadata + - metadata: aggregate_metadata_response - name: aggregate_name - updated_at: updated_consider_null - uuid: aggregate_uuid @@ -350,3 +354,36 @@ Response .. literalinclude:: ../../doc/api_samples/os-aggregates/v2.41/aggregates-metadata-post-resp.json :language: javascript + +Request Image Pre-caching for Aggregate +======================================= + +.. rest_method:: POST /os-aggregates/{aggregate_id}/images + +Requests that a set of images be pre-cached on compute nodes within the referenced aggregate. + +This API is available starting with microversion 2.81. + +Normal response codes: 202 + +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +itemNotFound(404) + +Request +------- + +.. rest_parameters:: parameters.yaml + + - aggregate_id: aggregate_id + - cache: cache + - cache.id: image_id_body + +**Example Request Image pre-caching for Aggregate (v2.81): JSON request** + +.. literalinclude:: ../../doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json + :language: javascript + +Response +-------- + +The response body is always empty. diff --git a/api-ref/source/os-availability-zone.inc b/api-ref/source/os-availability-zone.inc index 9869dc596c3..bcd9a3081ba 100644 --- a/api-ref/source/os-availability-zone.inc +++ b/api-ref/source/os-availability-zone.inc @@ -1,5 +1,7 @@ .. -*- rst -*- +.. _os-availability-zone: + =========================================== Availability zones (os-availability-zone) =========================================== diff --git a/api-ref/source/os-baremetal-nodes.inc b/api-ref/source/os-baremetal-nodes.inc index 0208bb453c8..c79b5bd77c3 100644 --- a/api-ref/source/os-baremetal-nodes.inc +++ b/api-ref/source/os-baremetal-nodes.inc @@ -11,7 +11,7 @@ Nova has deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Bare metal APIs - `__. + `__. Bare metal nodes. diff --git a/api-ref/source/os-cells.inc b/api-ref/source/os-cells.inc index cbc20ad42b4..452f03459d4 100644 --- a/api-ref/source/os-cells.inc +++ b/api-ref/source/os-cells.inc @@ -1,8 +1,4 @@ .. -*- rst -*- -.. needs:parameter_verification -.. needs:example_verification -.. needs:body_verification - ============================== Cells (os-cells, capacities) @@ -11,10 +7,13 @@ Adds neighbor cells, lists neighbor cells, and shows the capabilities of the local cell. By default, only administrators can manage cells. -.. note:: These APIs refer to a Cells v1 deployment which is optional and not - recommended for new deployments of Nova. These are not used with Cells v2 - which is required beginning with the 15.0.0 Ocata release where all Nova - deployments consist of at least one Cells v2 cell. +.. warning:: + + These APIs refer to a Cells v1 deployment which was deprecated in the 16.0.0 + Pike release. These are not used with Cells v2 which is required beginning + with the 15.0.0 Ocata release where all Nova deployments consist of at least + one Cells v2 cell. + They were removed in the 20.0.0 Train release. List Cells ========== @@ -26,7 +25,7 @@ Lists cells. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), -NotImplemented(501) +gone(410), notImplemented(501) Request ------- @@ -37,8 +36,6 @@ Request - limit: limit_simple - offset: offset_simple -.. TODO(cdent): How do we indicate optionality of a URI parameter? - Response -------- @@ -54,14 +51,10 @@ Create Cell Create a new cell. -Normal response code: 200 +Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), -NotImplemented(501) - -.. TODO(cdent): need to figure out body stuff for request and response - -.. TODO(cdent): need a sample +gone(410), notImplemented(501) Capacities ========== @@ -70,10 +63,10 @@ Capacities Retrieve capacities. -Error response codes: badRequest(400), unauthorized(401), forbidden(403), -NotImplemented(501) +Normal response codes: 200 -.. TODO(cdent): Need to do more digging, no idea. +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +gone(410), notImplemented(501) List Cells With Details ======================= @@ -85,7 +78,7 @@ Lists cells with details of capabilities. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), -NotImplemented(501) +gone(410), notImplemented(501) Request ------- @@ -95,17 +88,6 @@ Request - limit: limit_simple - offset: offset_simple -Response --------- - -**Example List Cells With Details: JSON response** - -.. TODO(cdent): This sample was initially list with an empty list of cells. - The newly listed sample does not yet exist. - -.. TODO(cdent): literal-include: ../../doc/api_samples/os-cells/cells-list-details-resp.json - :language: javascript - Info For This Cell ================== @@ -113,12 +95,10 @@ Info For This Cell Retrieve info about the current cell. -Normal response code: 200 +Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), -NotImplemented(501) - -.. TODO(cdent): this is weird, data is structured entirely differently. +gone(410), notImplemented(501) Show Cell Data ============== @@ -130,7 +110,7 @@ Shows data for a cell. Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), -itemNotFound(404), NotImplemented(501) +itemNotFound(404), gone(410), notImplemented(501) Request ------- @@ -150,17 +130,22 @@ Response Update a Cell ============= -.. rest_method:: PUT /os-cells/{cell_od} +.. rest_method:: PUT /os-cells/{cell_id} Update an existing cell. -Normal response code: 200 +Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), -itemNotFound(404), NotImplemented(501) +itemNotFound(404), gone(410), notImplemented(501) -.. TODO(cdent): Figure out what's going on here. +Request +------- + +.. rest_parameters:: parameters.yaml + + - cell_id: cell_id Delete a Cell ============= @@ -169,10 +154,17 @@ Delete a Cell Remove a cell. -Normal response code: 200 +Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), -itemNotFound(404), NotImplemented(501) +itemNotFound(404), gone(410), notImplemented(501) + +Request +------- + +.. rest_parameters:: parameters.yaml + + - cell_id: cell_id Show Cell Capacities ==================== @@ -181,12 +173,10 @@ Show Cell Capacities Shows capacities for a cell. -.. TODO(cdent): What's a capacities. - -Normal response codes: 200,501 +Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), -itemNotFound(404), NotImplemented(501) +itemNotFound(404), gone(410), notImplemented(501) Request ------- diff --git a/api-ref/source/os-cloudpipe.inc b/api-ref/source/os-cloudpipe.inc index 584e7fa22e5..6898bb06c55 100644 --- a/api-ref/source/os-cloudpipe.inc +++ b/api-ref/source/os-cloudpipe.inc @@ -33,7 +33,7 @@ Response - created_at: created - instance_id: instance_id_cloudpipe - internal_ip: fixed_ip - - project_id: project_id_instance_action + - project_id: project_id_server - public_ip: vpn_public_ip_resp - public_port: vpn_public_port_resp - state: vpn_state diff --git a/api-ref/source/os-consoles.inc b/api-ref/source/os-consoles.inc index b19bf706659..fe3c6545897 100644 --- a/api-ref/source/os-consoles.inc +++ b/api-ref/source/os-consoles.inc @@ -1,12 +1,15 @@ .. -*- rst -*- -=============================================================== - Server consoles (servers, os-consoles, os-console-auth-tokens) -=============================================================== +================================================== + XenServer VNC Proxy (XVP) consoles (os-consoles) +================================================== -Manages server consoles. +Manages server XVP consoles. -.. note:: This is only used in Xenserver VNC Proxy. +.. warning:: + + These APIs are only applicable when using the XenServer virt driver. + They were removed in the 21.0.0 (Ussuri) release. Lists Consoles ============== @@ -17,7 +20,7 @@ Lists all consoles for a server. Normal response codes: 200 -Error response codes: unauthorized(401), forbidden(403) +Error response codes: unauthorized(401), forbidden(403), gone(410) Request ------- @@ -53,7 +56,8 @@ Creates a console for a server. Normal response codes: 200 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +gone(410) Request ------- @@ -77,7 +81,8 @@ Shows console details for a server. Normal response codes: 200 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +gone(410) Request ------- @@ -117,7 +122,8 @@ Deletes a console for a server. Normal response codes: 202 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +gone(410) Request ------- @@ -132,48 +138,3 @@ Response -------- If successful, this method does not return a response body. - - -Show Console Connection Information -=================================== - -.. rest_method:: GET /os-console-auth-tokens/{console_token} - -Given the console authentication token for a server, -shows the related connection information. - -This method used to be available only for the ``rdp-html5`` console type before -microversion 2.31. Starting from microversion 2.31 it's available for all -console types. - -Normal response codes: 200 - -Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) - -Request -------- - -.. rest_parameters:: parameters.yaml - - - - console_token: console_token - -| - -Response --------- - -.. rest_parameters:: parameters.yaml - - - console: console - - instance_uuid: instance_id_body - - host: console_host - - port: port_number - - internal_access_path: internal_access_path - -| - -**Example Show Console Authentication Token** - -.. literalinclude:: ../../doc/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json - :language: javascript diff --git a/api-ref/source/os-flavor-extra-specs.inc b/api-ref/source/os-flavor-extra-specs.inc index 4ea220a9f64..8ecbdfa15ee 100644 --- a/api-ref/source/os-flavor-extra-specs.inc +++ b/api-ref/source/os-flavor-extra-specs.inc @@ -7,6 +7,10 @@ Lists, creates, deletes, and updates the extra-specs or keys for a flavor. +Refer to `Compute Flavors +`__ +for available built-in extra specs. + List Extra Specs For A Flavor ============================= @@ -48,11 +52,6 @@ Create Extra Specs For A Flavor Creates extra specs for a flavor, by ID. -.. note:: Please reference: - `Compute Flavors - `__ - for available built-in extra specs under ``Extra Specs`` section. - Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), diff --git a/api-ref/source/os-floating-ip-dns.inc b/api-ref/source/os-floating-ip-dns.inc index 0cd8c5a14b0..6d62b83264e 100644 --- a/api-ref/source/os-floating-ip-dns.inc +++ b/api-ref/source/os-floating-ip-dns.inc @@ -1,7 +1,6 @@ .. -*- rst -*- -.. needs:parameter_verification -.. needs:example_verification -.. needs:body_verification +.. NOTE(gmann): These APIs are deprecated so do not update this + file even body, example or parameters are not complete. ============================================= Floating IP DNS records (os-floating-ip-dns) diff --git a/api-ref/source/os-floating-ip-pools.inc b/api-ref/source/os-floating-ip-pools.inc index 4054daf41a0..7860560269c 100644 --- a/api-ref/source/os-floating-ip-pools.inc +++ b/api-ref/source/os-floating-ip-pools.inc @@ -38,7 +38,7 @@ Response .. rest_parameters:: parameters.yaml - floating_ip_pools: floating_ip_pools - - name: floating_ip_pool_name + - name: floating_ip_pool_name_or_id **Example List Floating Ip Pools: JSON response** diff --git a/api-ref/source/os-floating-ips.inc b/api-ref/source/os-floating-ips.inc index ad7588ca9da..a4911dec217 100644 --- a/api-ref/source/os-floating-ips.inc +++ b/api-ref/source/os-floating-ips.inc @@ -10,7 +10,7 @@ deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Network APIs - `__. + `__. Lists floating IP addresses for a project. Also, creates (allocates) a floating IP address for a project, shows floating IP address details, @@ -20,17 +20,17 @@ The cloud administrator configures a pool of floating IP addresses in OpenStack Compute. The project quota defines the maximum number of floating IP addresses that you can allocate to the project. After you `allocate a floating IP -address `__ +address `__ for a project, you can: - `Add (associate) the floating IP - address `__ with an instance in the project. You can associate only one floating IP address with an instance at a time. - `Remove (disassociate) the floating IP - address `__ from an instance in the project. @@ -62,7 +62,7 @@ Response - id: floating_ip_id_value - instance_id: server_id - ip: floating_ip - - pool: floating_ip_pool_name + - pool: floating_ip_pool_name_or_id **Example List Floating Ip Addresses** @@ -86,7 +86,7 @@ can change these permissions through the ``policy.json`` file. Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), +Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request @@ -94,7 +94,7 @@ Request .. rest_parameters:: parameters.yaml - - pool: floating_ip_pool_name + - pool: floating_ip_pool_name_or_id **Example Create (Allocate) Floating Ip Address** @@ -111,7 +111,7 @@ Response - id: floating_ip_id_value - instance_id: server_id - ip: floating_ip - - pool: floating_ip_pool_name + - pool: floating_ip_pool_name_or_id **Example Create (Allocate) Floating Ip Address: JSON response** @@ -131,7 +131,7 @@ can change these permissions through the ``policy.json`` file. Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), +Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) Request @@ -151,7 +151,7 @@ Response - id: floating_ip_id_value - instance_id: server_id - ip: floating_ip - - pool: floating_ip_pool_name + - pool: floating_ip_pool_name_or_id **Example Show Floating Ip Address Details: JSON response** @@ -176,7 +176,7 @@ can change these permissions through the ``policy.json`` file. Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), - itemNotFound(404) +itemNotFound(404) Request ------- diff --git a/api-ref/source/os-hypervisors.inc b/api-ref/source/os-hypervisors.inc index b6e661f8599..6363b409b40 100644 --- a/api-ref/source/os-hypervisors.inc +++ b/api-ref/source/os-hypervisors.inc @@ -12,6 +12,7 @@ for a hypervisor, lists all servers on hypervisors that match the given ``hypervisor_hostname_pattern`` or searches for hypervisors by the given ``hypervisor_hostname_pattern``. + List Hypervisors ================ @@ -64,6 +65,7 @@ Response .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-with-servers-resp.json :language: javascript + List Hypervisors Details ======================== @@ -121,7 +123,8 @@ Response - service.host: host_name_body - service.id: service_id_body_2_52 - service.id: service_id_body_2_53 - - service.disable_reason: service_disable_reason + - service.disabled_reason: service_disable_reason + - uptime: hypervisor_uptime - vcpus: hypervisor_vcpus - vcpus_used: hypervisor_vcpus_used - hypervisor_links: hypervisor_links @@ -136,20 +139,41 @@ Response .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json :language: javascript -Show Hypervisor Statistics -========================== +**Example List Hypervisors Details (v2.88): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json + :language: javascript + + +Show Hypervisor Statistics (DEPRECATED) +======================================= .. rest_method:: GET /os-hypervisors/statistics + max_version: 2.87 Shows summary statistics for all enabled hypervisors over all compute nodes. +.. warning:: + + This API is deprecated and will fail with HTTP 404 starting with microversion + 2.88. Use placement to get information on resource usage across hypervisors. + Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. +.. note:: + + As noted, some of the parameters in the response representing totals do not + take allocation ratios into account. This can result in a disparity between + the totals and the usages. A more accurate representation of state can be + obtained using `placement`__. + + __ https://docs.openstack.org/api-ref/placement/#list-resource-provider-usages + Normal response codes: 200 -Error response codes: unauthorized(401), forbidden(403) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404) Response -------- @@ -158,7 +182,7 @@ Response - hypervisor_statistics: hypervisor_statistics - count: hypervisor_count - - current_workload: current_workload + - current_workload: current_workload_total - disk_available_least: disk_available_least_total - free_disk_gb: hypervisor_free_disk_gb_total - free_ram_mb: free_ram_mb_total @@ -175,6 +199,7 @@ Response .. literalinclude:: ../../doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json :language: javascript + Show Hypervisor Details ======================= @@ -186,6 +211,15 @@ Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. +.. note:: + + As noted, some of the parameters in the response representing totals do not + take allocation ratios into account. This can result in a disparity between + the totals and the usages. A more accurate representation of state can be + obtained using `placement`__. + + __ https://docs.openstack.org/api-ref/placement/#show-resource-provider-usages + Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404) @@ -230,7 +264,8 @@ Response - service.host: host_name_body - service.id: service_id_body_2_52 - service.id: service_id_body_2_53 - - service.disable_reason: service_disable_reason + - service.disabled_reason: service_disable_reason + - uptime: hypervisor_uptime - vcpus: hypervisor_vcpus - vcpus_used: hypervisor_vcpus_used @@ -244,13 +279,26 @@ Response .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-show-with-servers-resp.json :language: javascript -Show Hypervisor Uptime -====================== +**Example Show Hypervisors Details (v2.88): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json + :language: javascript + + +Show Hypervisor Uptime (DEPRECATED) +=================================== .. rest_method:: GET /os-hypervisors/{hypervisor_id}/uptime + max_version: 2.87 Shows the uptime for a given hypervisor. +.. warning:: + + This API is deprecated and will fail with HTTP 404 starting with + microversion 2.88. Use `Show Hypervisor Details`_ with microversion 2.88 + and later to get this information. + Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. @@ -290,8 +338,9 @@ Response .. literalinclude:: ../../doc/api_samples/os-hypervisors/v2.53/hypervisors-uptime-resp.json :language: javascript -Search Hypervisor -================= + +Search Hypervisor (DEPRECATED) +============================== .. rest_method:: GET /os-hypervisors/{hypervisor_hostname_pattern}/search max_version: 2.52 @@ -333,8 +382,9 @@ Response .. literalinclude:: ../../doc/api_samples/os-hypervisors/hypervisors-search-resp.json :language: javascript -List Hypervisor Servers -======================= + +List Hypervisor Servers (DEPRECATED) +==================================== .. rest_method:: GET /os-hypervisors/{hypervisor_hostname_pattern}/servers max_version: 2.52 diff --git a/api-ref/source/os-instance-actions.inc b/api-ref/source/os-instance-actions.inc index df961d45e9b..f0c191c67a5 100644 --- a/api-ref/source/os-instance-actions.inc +++ b/api-ref/source/os-instance-actions.inc @@ -34,6 +34,7 @@ Request - limit: instance_action_limit - marker: instance_action_marker - changes-since: changes_since_instance_action + - changes-before: changes_before_instance_action Response -------- @@ -45,10 +46,10 @@ Response - action: action - instance_uuid: instance_id_body - message: message - - project_id: project_id_instance_action + - project_id: project_id_server_action - request_id: request_id_body - start_time: start_time - - user_id: user_id + - user_id: user_id_server_action - updated_at: updated_instance_action - links: instance_actions_next_links @@ -100,10 +101,10 @@ Response - action: action - instance_uuid: instance_id_body - message: message - - project_id: project_id_instance_action + - project_id: project_id_server_action - request_id: request_id_body - start_time: start_time - - user_id: user_id + - user_id: user_id_server_action - events: instance_action_events_2_50 - events: instance_action_events_2_51 - events.event: event @@ -113,6 +114,7 @@ Response - events.traceback: event_traceback - events.hostId: event_hostId - events.host: event_host + - events.details: event_details - updated_at: updated_instance_action **Example Show Server Action Details For Admin (v2.62)** @@ -124,3 +126,8 @@ Response .. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.62/instance-action-get-non-admin-resp.json :language: javascript + +**Example Show Server Action Details For System Reader (v2.84)** + +.. literalinclude:: ../../doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json + :language: javascript diff --git a/api-ref/source/os-interface.inc b/api-ref/source/os-interface.inc index 62151cbc3eb..10f3a450e87 100644 --- a/api-ref/source/os-interface.inc +++ b/api-ref/source/os-interface.inc @@ -41,12 +41,18 @@ Response - mac_addr: mac_addr - net_id: net_id_resp - port_id: port_id_resp + - tag: device_tag_nic_attachment_resp **Example List Port Interfaces: JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json :language: javascript +**Example List Tagged Port Interfaces (v2.70): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json + :language: javascript + Create Interface ================ @@ -103,12 +109,18 @@ Response - net_id: net_id_resp - port_id: port_id_resp - port_state: port_state + - tag: device_tag_nic_attachment_resp **Example Create Interface: JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json :language: javascript +**Example Create Tagged Interface (v2.70): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json + :language: javascript + Show Port Interface Details =========================== @@ -142,12 +154,18 @@ Response - mac_addr: mac_addr - net_id: net_id_resp - port_id: port_id_resp + - tag: device_tag_nic_attachment_resp **Example Show Port Interface Details: JSON response** .. literalinclude:: ../../doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json :language: javascript +**Example Show Tagged Port Interface Details (v2.70): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json + :language: javascript + Detach Interface ================ diff --git a/api-ref/source/os-keypairs.inc b/api-ref/source/os-keypairs.inc index e728ef64357..e03e7d91aaf 100644 --- a/api-ref/source/os-keypairs.inc +++ b/api-ref/source/os-keypairs.inc @@ -41,7 +41,7 @@ Response **Example List Keypairs (v2.35): JSON response** -.. literalinclude:: ../../doc/api_samples/keypairs/v2.35/keypairs-list-resp.json +.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json :language: javascript Create Or Import Keypair @@ -72,7 +72,7 @@ Request **Example Create Or Import Keypair (v2.10): JSON request** -.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json +.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json :language: javascript Response @@ -90,7 +90,7 @@ Response **Example Create Or Import Keypair (v2.10): JSON response** -.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json +.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json :language: javascript Show Keypair Details @@ -131,7 +131,7 @@ Response **Example Show Keypair Details (v2.10): JSON response** -.. literalinclude:: ../../doc/api_samples/keypairs/v2.10/keypairs-get-resp.json +.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json :language: javascript Delete Keypair diff --git a/api-ref/source/os-migrations.inc b/api-ref/source/os-migrations.inc index c8bc587f83b..2cfd948666f 100644 --- a/api-ref/source/os-migrations.inc +++ b/api-ref/source/os-migrations.inc @@ -17,6 +17,9 @@ Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. +Starting from microversion 2.59, the response is sorted by ``created_at`` +and ``id`` in descending order. + Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403) @@ -35,12 +38,16 @@ Request - limit: migration_limit - marker: migration_marker - changes-since: changes_since_migration + - changes-before: changes_before_migration + - user_id: user_id_query_migrations + - project_id: project_id_query_migrations Response -------- .. rest_parameters:: parameters.yaml + - migrations: migrations - created_at: created - dest_compute: migrate_dest_compute - dest_host: migrate_dest_host @@ -57,19 +64,20 @@ Response - links: migration_links_2_23 - uuid: migration_uuid - migrations_links: migration_next_links_2_59 + - user_id: user_id_migration_2_80 + - project_id: project_id_migration_2_80 **Example List Migrations: JSON response** .. literalinclude:: ../../doc/api_samples/os-migrations/migrations-get.json :language: javascript -**Example List Migrations (v2.59):** +**Example List Migrations (v2.80):** -.. literalinclude:: ../../doc/api_samples/os-migrations/v2.59/migrations-get.json +.. literalinclude:: ../../doc/api_samples/os-migrations/v2.80/migrations-get.json :language: javascript -**Example List Migrations With Paging (v2.59):** +**Example List Migrations With Paging (v2.80):** -.. literalinclude:: ../../doc/api_samples/os-migrations/v2.59/migrations-get-with-limit.json +.. literalinclude:: ../../doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json :language: javascript - diff --git a/api-ref/source/os-networks.inc b/api-ref/source/os-networks.inc index 450cd4981ad..a9ee87f69ac 100644 --- a/api-ref/source/os-networks.inc +++ b/api-ref/source/os-networks.inc @@ -1,32 +1,26 @@ .. -*- rst -*- -.. needs:parameter_verification -.. needs:example_verification -.. needs:body_verification -.. NOTE(sdague): for future verification only worry about the non - deprecated methods in this file. Let's not spend a ton of brain - power on the associate/disassociate that's going away. - -===================================== + +====================================== Networks (os-networks) (DEPRECATED) -===================================== +====================================== + +.. warning:: -.. warning:: The networks API was designed to work with - ``nova-network``. Some features are proxied to - ``neutron`` when appropriate, but as with all translation - proxies, this is far from perfect compatibility. These - APIs should be avoided in new applications in favor of - using ``neutron`` directly. These will fail with a 404 - starting from microversion 2.36. - See: `Relevant Network APIs - `__. + This API was designed to work with ``nova-network`` which was deprecated in + the 14.0.0 (Newton) release and removed in the 21.0.0 (Ussuri) release. Some + features are proxied to the Network service (neutron) when appropriate, but + as with all translation proxies, this is far from perfect compatibility. + These APIs should be avoided in new applications in favor of `using + neutron directly`__. These will fail with a 404 starting from microversion + 2.36. They were removed in the 21.0.0 (Ussuri) release. +__ https://docs.openstack.org/api-ref/network/v2/#networks Creates, lists, shows information for, and deletes networks. Adds network to a project, disassociates a network from a project, and disassociates a project from a network. - Associates host with and disassociates host from a network. List Networks @@ -64,7 +58,8 @@ these permissions through the ``policy.json`` file. Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), NotImplemented(501) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +conflict(409), gone(410), notImplemented(501) Request ------- @@ -95,7 +90,8 @@ this operation. Cloud providers can change these permissions through the Normal response codes: 202 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), NotImplemented(501) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +gone(410), notImplemented(501) Request ------- @@ -150,7 +146,8 @@ these permissions through the ``policy.json`` file. Normal response codes: 202 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +conflict(409), gone(410) Request ------- @@ -164,15 +161,11 @@ Response There is no body content for the response of a successful DELETE query. -Associate Host (DEPRECATED) -=========================== +Associate Host +============== .. rest_method:: POST /os-networks/{network_id}/action -.. warning:: - This API is only available with ``nova-network`` which is - deprecated. It should be avoided in any new applications. - Associates a network with a host. Specify the ``associate_host`` action in the request body. @@ -183,7 +176,8 @@ permissions through the ``policy.json`` file. Normal response codes: 202 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +gone(410), notImplemented(501) Request ------- @@ -203,15 +197,11 @@ Response There is no body content for the response of a successful POST query. -Disassociate Network (DEPRECATED) -================================= +Disassociate Network +==================== .. rest_method:: POST /os-networks/{network_id}/action -.. warning:: - This API is only available with ``nova-network`` which is - deprecated. It should be avoided in any new applications. - Disassociates a network from a project. You can then reuse the network. Specify the ``disassociate`` action in the request body. @@ -222,7 +212,8 @@ these permissions through the ``policy.json`` file. Normal response codes: 202 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +gone(410), notImplemented(501) Request ------- @@ -241,15 +232,11 @@ Response There is no body content for the response of a successful POST query. -Disassociate Host (DEPRECATED) -============================== +Disassociate Host +================= .. rest_method:: POST /os-networks/{network_id}/action -.. warning:: - This API is only available with ``nova-network`` which is - deprecated. It should be avoided in any new applications. - Disassociates a host from a network. Specify the ``disassociate_host`` action in the request body. @@ -260,7 +247,8 @@ these permissions through the ``policy.json`` file. Normal response codes: 202 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +gone(410), notImplemented(501) Request ------- @@ -280,15 +268,11 @@ Response There is no body content for the response of a successful POST query. -Disassociate Project (DEPRECATED) -================================= +Disassociate Project +==================== .. rest_method:: POST /os-networks/{network_id}/action -.. warning:: - This API is only available with ``nova-network`` which is - deprecated. It should be avoided in any new applications. - Disassociates a project from a network. Specify the ``disassociate_project`` action in the request body. @@ -299,7 +283,8 @@ these permissions through the ``policy.json`` file. Normal response codes: 202 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), NotImplemented(501) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +gone(410), notImplemented(501) Request ------- diff --git a/api-ref/source/os-quota-class-sets.inc b/api-ref/source/os-quota-class-sets.inc index d604409664f..ee7aeb4f695 100644 --- a/api-ref/source/os-quota-class-sets.inc +++ b/api-ref/source/os-quota-class-sets.inc @@ -64,21 +64,21 @@ Response - quota_class_set: quota_class_set - cores: cores_quota_class - - fixed_ips: fixed_ips_quota_class - - floating_ips: floating_ips_quota_class - id: quota_class_id_body - - injected_file_content_bytes: injected_file_content_bytes - - injected_file_path_bytes: injected_file_path_bytes - - injected_files: injected_files_quota_class - instances: instances_quota_class - key_pairs: key_pairs_quota_class - metadata_items: metadata_items - ram: ram_quota_class + - fixed_ips: fixed_ips_quota_class + - floating_ips: floating_ips_quota_class + - networks: networks_quota_optional - security_group_rules: security_group_rules_quota_class - security_groups: security_groups_quota_class - server_groups: server_groups_quota_class - server_group_members: server_group_members_quota_class - - networks: networks_quota_optional + - injected_file_content_bytes: injected_file_content_bytes + - injected_file_path_bytes: injected_file_path_bytes + - injected_files: injected_files_quota_class **Example Show A Quota Class: JSON response(2.50)** @@ -108,20 +108,20 @@ Request - id: quota_class_id - quota_class_set: quota_class_set - cores: cores_quota_class_optional - - fixed_ips: fixed_ips_quota_class_optional - - floating_ips: floating_ips_quota_class_optional - - injected_file_content_bytes: injected_file_content_bytes_quota_optional - - injected_file_path_bytes: injected_file_path_bytes_quota_optional - - injected_files: injected_files_quota_class_optional - instances: instances_quota_class_optional - key_pairs: key_pairs_quota_class_optional - metadata_items: metadata_items_quota_optional - ram: ram_quota_class_optional - - security_group_rules: security_group_rules_quota_class_optional - - security_groups: security_groups_quota_class_optional - server_groups: server_groups_quota_class_optional - server_group_members: server_group_members_quota_optional + - fixed_ips: fixed_ips_quota_class_optional + - floating_ips: floating_ips_quota_class_optional - networks: networks_quota_optional + - security_group_rules: security_group_rules_quota_class_optional + - security_groups: security_groups_quota_class_optional + - injected_file_content_bytes: injected_file_content_bytes_quota_optional + - injected_file_path_bytes: injected_file_path_bytes_quota_optional + - injected_files: injected_files_quota_class_optional **Example Update Quotas: JSON request(2.50)** @@ -135,20 +135,20 @@ Response - quota_class_set: quota_class_set - cores: cores_quota_class - - fixed_ips: fixed_ips_quota_class - - floating_ips: floating_ips_quota_class - - injected_file_content_bytes: injected_file_content_bytes - - injected_file_path_bytes: injected_file_path_bytes - - injected_files: injected_files_quota_class - instances: instances_quota_class - key_pairs: key_pairs_quota_class - metadata_items: metadata_items - ram: ram_quota_class + - fixed_ips: fixed_ips_quota_class + - floating_ips: floating_ips_quota_class + - networks: networks_quota_optional - security_group_rules: security_group_rules_quota_class - security_groups: security_groups_quota_class - server_groups: server_groups_quota_class - server_group_members: server_group_members_quota_class - - networks: networks_quota_optional + - injected_file_content_bytes: injected_file_content_bytes + - injected_file_path_bytes: injected_file_path_bytes + - injected_files: injected_files_quota_class **Example Update Quotas: JSON response(2.50)** diff --git a/api-ref/source/os-quota-sets.inc b/api-ref/source/os-quota-sets.inc index 30fef320eca..29d4c5e7b6e 100644 --- a/api-ref/source/os-quota-sets.inc +++ b/api-ref/source/os-quota-sets.inc @@ -39,21 +39,21 @@ Response - quota_set: quota_set - cores: cores - - fixed_ips: fixed_ips_quota - - floating_ips: floating_ips - id: quota_tenant_or_user_id_body - - injected_file_content_bytes: injected_file_content_bytes - - injected_file_path_bytes: injected_file_path_bytes - - injected_files: injected_files - instances: instances - key_pairs: key_pairs - metadata_items: metadata_items - ram: ram - - security_group_rules: security_group_rules_quota - - security_groups: security_groups_quota - server_groups: server_groups - server_group_members: server_group_members + - fixed_ips: fixed_ips_quota + - floating_ips: floating_ips - networks: networks_quota_set_optional + - security_group_rules: security_group_rules_quota + - security_groups: security_groups_quota + - injected_file_content_bytes: injected_file_content_bytes + - injected_file_path_bytes: injected_file_path_bytes + - injected_files: injected_files **Example Show A Quota: JSON response** @@ -89,20 +89,20 @@ Request - quota_set: quota_set - force: force - cores: cores_quota_optional - - fixed_ips: fixed_ips_quota_optional - - floating_ips: floating_ips_quota_optional - - injected_file_content_bytes: injected_file_content_bytes_quota_optional - - injected_file_path_bytes: injected_file_path_bytes_quota_optional - - injected_files: injected_files_quota_optional - instances: instances_quota_optional - key_pairs: key_pairs_quota_optional - metadata_items: metadata_items_quota_optional - ram: ram_quota_optional - - security_group_rules: security_group_rules - - security_groups: security_groups_quota_optional - server_groups: server_groups_quota_optional - server_group_members: server_group_members_quota_optional + - fixed_ips: fixed_ips_quota_optional + - floating_ips: floating_ips_quota_optional - networks: networks_quota_set_optional + - security_group_rules: security_group_rules + - security_groups: security_groups_quota_optional + - injected_file_content_bytes: injected_file_content_bytes_quota_optional + - injected_file_path_bytes: injected_file_path_bytes_quota_optional + - injected_files: injected_files_quota_optional **Example Update Quotas: JSON request** @@ -121,20 +121,20 @@ Response - quota_set: quota_set - cores: cores - - fixed_ips: fixed_ips_quota - - floating_ips: floating_ips - - injected_file_content_bytes: injected_file_content_bytes - - injected_file_path_bytes: injected_file_path_bytes - - injected_files: injected_files - instances: instances - key_pairs: key_pairs - metadata_items: metadata_items - ram: ram - - security_group_rules: security_group_rules_quota - - security_groups: security_groups_quota - server_groups: server_groups - server_group_members: server_group_members + - fixed_ips: fixed_ips_quota + - floating_ips: floating_ips - networks: networks_quota_set_optional + - security_group_rules: security_group_rules_quota + - security_groups: security_groups_quota + - injected_file_content_bytes: injected_file_content_bytes + - injected_file_path_bytes: injected_file_path_bytes + - injected_files: injected_files **Example Update Quotas: JSON response** @@ -192,21 +192,21 @@ Response - quota_set: quota_set - cores: cores - - fixed_ips: fixed_ips_quota - - floating_ips: floating_ips - id: quota_tenant_or_user_id_body - - injected_file_content_bytes: injected_file_content_bytes - - injected_file_path_bytes: injected_file_path_bytes - - injected_files: injected_files - instances: instances - key_pairs: key_pairs - metadata_items: metadata_items - ram: ram - - security_group_rules: security_group_rules_quota - - security_groups: security_groups_quota - server_groups: server_groups - server_group_members: server_group_members + - fixed_ips: fixed_ips_quota + - floating_ips: floating_ips - networks: networks_quota_set_optional + - security_group_rules: security_group_rules_quota + - security_groups: security_groups_quota + - injected_file_content_bytes: injected_file_content_bytes + - injected_file_path_bytes: injected_file_path_bytes + - injected_files: injected_files **Example List Default Quotas For Tenant: JSON response** @@ -244,21 +244,21 @@ Response - quota_set: quota_set - cores: cores_quota_details - - fixed_ips: fixed_ips_quota_details - - floating_ips: floating_ips_quota_details - id: quota_tenant_or_user_id_body - - injected_file_content_bytes: injected_file_content_bytes_quota_details - - injected_file_path_bytes: injected_file_path_bytes_quota_details - - injected_files: injected_files_quota_details - instances: instances_quota_details - key_pairs: key_pairs_quota_details - metadata_items: metadata_items_quota_details - ram: ram_quota_details - - security_group_rules: security_group_rules_quota_details - - security_groups: security_groups_quota_details - server_groups: server_groups_quota_details - server_group_members: server_group_members_quota_details + - fixed_ips: fixed_ips_quota_details + - floating_ips: floating_ips_quota_details - networks: networks_quota_set_optional + - security_group_rules: security_group_rules_quota_details + - security_groups: security_groups_quota_details + - injected_file_content_bytes: injected_file_content_bytes_quota_details + - injected_file_path_bytes: injected_file_path_bytes_quota_details + - injected_files: injected_files_quota_details **Example Show A Quota: JSON response** diff --git a/api-ref/source/os-security-group-default-rules.inc b/api-ref/source/os-security-group-default-rules.inc index 4ed6f309d2c..9d47f0ad25e 100644 --- a/api-ref/source/os-security-group-default-rules.inc +++ b/api-ref/source/os-security-group-default-rules.inc @@ -1,16 +1,15 @@ .. -*- rst -*- -.. needs:body_verification - -================================================================================ - Rules for default security group (os-security-group-default-rules) (DEPRECATED) -================================================================================ +==================================================================== + Rules for default security group (os-security-group-default-rules) +==================================================================== .. warning:: This API only available with ``nova-network`` which is deprecated. It should be avoided in any new applications. These will fail with a 404 starting from microversion 2.36. + They were completely removed in the 21.0.0 (Ussuri) release. Lists, shows information for, and creates default security group rules. @@ -23,7 +22,8 @@ Lists default security group rules. Normal response codes: 200 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), notImplemented(501) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +gone(410), notImplemented(501) Response -------- @@ -52,7 +52,8 @@ Shows details for a security group rule. Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), notImplemented(501) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +itemNotFound(404), gone(410), notImplemented(501) Request ------- @@ -91,7 +92,8 @@ IP protocol ( ``ip_protocol`` ) value. Otherwise, the operation returns the ``Ba Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), notImplemented(501) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +conflict(409), gone(410), notImplemented(501) Request ------- @@ -136,7 +138,8 @@ Deletes a security group rule. Normal response codes: 204 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), notImplemented(501) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +itemNotFound(404), gone(410), notImplemented(501) Request ------- diff --git a/api-ref/source/os-security-group-rules.inc b/api-ref/source/os-security-group-rules.inc index 09b9956c210..1a750c60d0d 100644 --- a/api-ref/source/os-security-group-rules.inc +++ b/api-ref/source/os-security-group-rules.inc @@ -10,7 +10,7 @@ deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Network APIs - `__. + `__. Creates and deletes security group rules. diff --git a/api-ref/source/os-security-groups.inc b/api-ref/source/os-security-groups.inc index 3838d97a006..596df40cc72 100644 --- a/api-ref/source/os-security-groups.inc +++ b/api-ref/source/os-security-groups.inc @@ -1,7 +1,6 @@ .. -*- rst -*- -.. needs:parameter_verification -.. needs:example_verification -.. needs:body_verification +.. NOTE(gmann): These APIs are deprecated so do not update this + file even body, example or parameters are not complete. ================================================== Security groups (os-security-groups) (DEPRECATED) @@ -13,7 +12,7 @@ deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Network APIs - `__. + `__. Lists, shows information for, creates, updates and deletes security groups. diff --git a/api-ref/source/os-server-external-events.inc b/api-ref/source/os-server-external-events.inc index b31c38116f6..d96bc263969 100644 --- a/api-ref/source/os-server-external-events.inc +++ b/api-ref/source/os-server-external-events.inc @@ -7,11 +7,11 @@ .. warning:: This is an ``admin`` level service API only designed to be used by other OpenStack services. The point of this API is to coordinate - between Nova and Neutron, Nova and Cinder (and potentially future - services) on activities they both need to be involved in, + between Nova and Neutron, Nova and Cinder, Nova and Ironic (and potentially + future services) on activities they both need to be involved in, such as network hotplugging. - Unless you are writing Neutron or Cinder code you **should not** + Unless you are writing Neutron, Cinder or Ironic code you **should not** be using this API. Creates one or more external events. The API dispatches each event to a @@ -32,11 +32,15 @@ updated ``code`` and ``status`` indicating their level of success. Normal response codes: 200, 207 A 200 will be returned if all events succeeded, 207 will be returned -if some events could not be processed. The ``code`` attribute for the +if any events could not be processed. The ``code`` attribute for the event will explain further what went wrong. -Error response codes: badRequest(400), unauthorized(401), forbidden(403), -itemNotFound(404) +Error response codes: badRequest(400), unauthorized(401), forbidden(403) + +.. note:: Prior to the fix for `bug 1855752`_, error response code 404 may be + erroneously returned when all events failed. + +.. _bug 1855752: https://bugs.launchpad.net/nova/+bug/1855752 Request ------- diff --git a/api-ref/source/os-server-tags.inc b/api-ref/source/os-server-tags.inc index 96fbad73fba..9ed62702e80 100644 --- a/api-ref/source/os-server-tags.inc +++ b/api-ref/source/os-server-tags.inc @@ -47,7 +47,7 @@ Response .. rest_parameters:: parameters.yaml - - tags: tags + - tags: tags_no_min **Example List Tags:** @@ -71,7 +71,7 @@ Request .. rest_parameters:: parameters.yaml - server_id: server_id_path - - tags: tags + - tags: tags_no_min **Example Replace Tags:** @@ -83,7 +83,7 @@ Response .. rest_parameters:: parameters.yaml - - tags: tags + - tags: tags_no_min **Example Replace Tags:** diff --git a/api-ref/source/os-services.inc b/api-ref/source/os-services.inc index 0f2c5cbc271..af495b4bac2 100644 --- a/api-ref/source/os-services.inc +++ b/api-ref/source/os-services.inc @@ -21,6 +21,14 @@ Lists all running Compute services. Provides details why any services were disabled. +.. note:: Starting with microversion 2.69 if service details cannot be loaded + due to a transient condition in the deployment like infrastructure failure, + the response body for those unavailable compute services in the down cells + will be missing keys. See `handling down cells + `__ + section of the Compute API guide for more information on the keys that + would be returned in the partial constructs. + Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403) @@ -50,11 +58,20 @@ Response - zone: OS-EXT-AZ:availability_zone - forced_down: forced_down_2_11 -**Example List Compute Services** +**Example List Compute Services (v2.11)** .. literalinclude:: ../../doc/api_samples/os-services/v2.11/services-list-get-resp.json :language: javascript +**Example List Compute Services (v2.69)** + +This is a sample response for the services from the non-responsive part of the +deployment. The responses for the available service records will be normal +without any missing keys. + +.. literalinclude:: ../../doc/api_samples/os-services/v2.69/services-list-get-resp.json + :language: javascript + Disable Scheduling For A Compute Service ======================================== @@ -195,7 +212,16 @@ Update Forced Down .. rest_method:: PUT /os-services/force-down -Set or unset ``forced_down`` flag for the service. +Set or unset ``forced_down`` flag for the service. ``forced_down`` is a manual +override to tell nova that the service in question has been fenced manually by +the operations team (either hard powered off, or network unplugged). That +signals that it is safe to proceed with ``evacuate`` or other operations that +nova has safety checks to prevent for hosts that are up. + +.. warning:: + + Setting a service forced down without completely fencing it will likely + result in the corruption of VMs on that host. Action ``force-down`` available as of microversion 2.11. @@ -246,7 +272,8 @@ Update Compute Service Update a compute service to enable or disable scheduling, including recording a reason why a compute service was disabled from scheduling. Set or unset the -``forced_down`` flag for the service. +``forced_down`` flag for the service. This operation is only allowed on +services whose ``binary`` is ``nova-compute``. This API is available starting with microversion 2.53. @@ -322,6 +349,12 @@ Attempts to delete a ``nova-compute`` service which is still hosting instances will result in a 409 HTTPConflict response. The instances will need to be migrated or deleted before a compute service can be deleted. +Similarly, attempts to delete a ``nova-compute`` service which is involved in +in-progress migrations will result in a 409 HTTPConflict response. The +migrations will need to be completed, for example confirming or reverting a +resize, or the instances will need to be deleted before the compute service can +be deleted. + .. important:: Be sure to stop the actual ``nova-compute`` process on the physical host *before* deleting the service with this API. Failing to do so can lead to the running service re-creating diff --git a/api-ref/source/os-simple-tenant-usage.inc b/api-ref/source/os-simple-tenant-usage.inc index 39c4af81513..570666649ea 100644 --- a/api-ref/source/os-simple-tenant-usage.inc +++ b/api-ref/source/os-simple-tenant-usage.inc @@ -8,6 +8,18 @@ Reports usage statistics of compute and storage resources periodically for an individual tenant or all tenants. The usage statistics will include all instances' CPU, memory and local disk during a specific period. +.. warning:: + + The `os-simple-tenant-usage` will report usage statistics based on the latest + flavor that is configured in the virtual machine (VM), and ignoring stop, + pause, and other events that might have happened with the VM. Therefore, it + uses the time the VM existed in the cloud environment to execute the usage + accounting. + + More information can be found at + http://eavesdrop.openstack.org/meetings/nova/2020/nova.2020-12-03-16.00.log.txt, + and https://review.opendev.org/c/openstack/nova/+/711113 + Microversion 2.40 added pagination (and ``next`` links) to the usage statistics via optional ``limit`` and ``marker`` query parameters. If ``limit`` isn't provided, the configurable ``max_limit`` will be used which diff --git a/api-ref/source/os-tenant-network.inc b/api-ref/source/os-tenant-network.inc index c464bdc33cf..41314fb24cb 100644 --- a/api-ref/source/os-tenant-network.inc +++ b/api-ref/source/os-tenant-network.inc @@ -1,11 +1,8 @@ .. -*- rst -*- -.. needs:parameter_verification -.. needs:example_verification -.. needs:body_verification -=================================================== +==================================================== Project networks (os-tenant-networks) (DEPRECATED) -=================================================== +==================================================== .. warning:: @@ -13,7 +10,7 @@ deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Network APIs - `__. + `__. Creates, lists, shows information for, and deletes project networks. @@ -58,7 +55,8 @@ through the ``policy.json`` file. Normal response codes: 200 -Error response codes: badRequest(400), unauthorized(401), forbidden(403), conflict(409), serviceUnavailable(503) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +conflict(409), gone(410), serviceUnavailable(503) **Example Create Project Network: JSON request** @@ -122,7 +120,8 @@ can change these permissions through the ``policy.json`` file. Normal response codes: 202 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) +Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), +conflict(409), gone(410) Request ------- diff --git a/api-ref/source/os-volume-attachments.inc b/api-ref/source/os-volume-attachments.inc index 99a125562a5..803d59dc61b 100644 --- a/api-ref/source/os-volume-attachments.inc +++ b/api-ref/source/os-volume-attachments.inc @@ -34,16 +34,25 @@ Response .. rest_parameters:: parameters.yaml - volumeAttachments: volumeAttachments - - device: device_resp - - id: attachment_id_required + - id: volume_attachment_id_resp - serverId: server_id - volumeId: volumeId_resp + - device: attachment_device_resp + - tag: device_tag_bdm_attachment_resp + - delete_on_termination: delete_on_termination_attachments_resp + - attachment_id: attachment_volume_id_resp + - bdm_uuid: attachment_bdm_id_resp **Example List volume attachments for an instance: JSON response** .. literalinclude:: ../../doc/api_samples/os-volumes/list-volume-attachments-resp.json :language: javascript +**Example List tagged volume attachments for an instance (v2.89): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.89/list-volume-attachments-resp.json + :language: javascript + Attach a volume to an instance ============================== @@ -63,6 +72,10 @@ Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNo to actually support a multiattach volume depends on the volume type and compute hosting the instance. +.. note:: This is an asynchronous API, callers should poll the status and list + of attachments of the volume within the volume API to determine when + the attachment has completed successfully. + Request ------- @@ -73,6 +86,7 @@ Request - volumeId: volumeId - device: device - tag: device_tag_bdm_attachment + - delete_on_termination: delete_on_termination_attachments_req **Example Attach a volume to an instance: JSON request** @@ -84,6 +98,11 @@ Request .. literalinclude:: ../../doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json :language: javascript +**Example Attach a volume to an instance with "delete_on_termination" (v2.79): JSON request** + +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.79/attach-volume-to-server-req.json + :language: javascript + Response -------- @@ -91,15 +110,27 @@ Response - volumeAttachment: volumeAttachment - device: device_resp - - id: attachment_id_required + - id: attachment_id_resp - serverId: server_id - volumeId: volumeId_resp + - tag: device_tag_bdm_attachment_resp + - delete_on_termination: delete_on_termination_attachments_resp **Example Attach a volume to an instance: JSON response** .. literalinclude:: ../../doc/api_samples/os-volumes/attach-volume-to-server-resp.json :language: javascript +**Example Attach a tagged volume to an instance (v2.70): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.70/attach-volume-to-server-resp.json + :language: javascript + +**Example Attach a volume with "delete_on_termination" (v2.79): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.79/attach-volume-to-server-resp.json + :language: javascript + Show a detail of a volume attachment ==================================== @@ -125,16 +156,25 @@ Response .. rest_parameters:: parameters.yaml - volumeAttachment: volumeAttachment - - device: device_resp - - id: attachment_id_required + - id: volume_attachment_id_resp - serverId: server_id - volumeId: volumeId_resp + - device: attachment_device_resp + - tag: device_tag_bdm_attachment_resp + - delete_on_termination: delete_on_termination_attachments_resp + - attachment_id: attachment_volume_id_resp + - bdm_uuid: attachment_bdm_id_resp **Example Show a detail of a volume attachment: JSON response** .. literalinclude:: ../../doc/api_samples/os-volumes/volume-attachment-detail-resp.json :language: javascript +**Example Show a detail of a tagged volume attachment (v2.89): JSON response** + +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.89/volume-attachment-detail-resp.json + :language: javascript + Update a volume attachment ========================== @@ -145,9 +185,28 @@ Update a volume attachment. .. note:: This action only valid when the server is in ACTIVE, PAUSED and RESIZED state, or a conflict(409) error will be returned. -Policy defaults enable only users with the administrative role or -the owner of the server to perform this operation. Cloud providers -can change these permissions through the ``policy.json`` file. +.. warning:: When updating volumeId, this API is typically meant to + only be used as part of a larger orchestrated volume + migration operation initiated in the block storage + service via the ``os-retype`` or ``os-migrate_volume`` + volume actions. Direct usage of this API to update + volumeId is not recommended and may result in needing to + hard reboot the server to update details within the guest + such as block storage serial IDs. Furthermore, updating + volumeId via this API is only implemented by `certain + compute drivers`_. + +.. _certain compute drivers: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_swap_volume + +Policy default role is 'rule:system_admin_or_owner', its scope is +[system, project], which allow project members or system admins to +change the fields of an attached volume of a server. Policy defaults +enable only users with the administrative role to change ``volumeId`` +via this operation. Cloud providers can change these permissions +through the ``policy.json`` file. + +Updating, or what is commonly referred to as "swapping", volume attachments +with volumes that have more than one read/write attachment, is not supported. Normal response codes: 202 @@ -162,10 +221,19 @@ Request - volume_id: volume_id_swap_src - volumeAttachment: volumeAttachment_put - volumeId: volumeId_swap + - delete_on_termination: delete_on_termination_put_req + - device: attachment_device_put_req + - serverId: attachment_server_id_put_req + - tag: device_tag_bdm_attachment_put_req + - id: attachment_id_put_req -**Example Update a volume attachment: JSON request** +.. note:: Other than ``volumeId``, as of v2.85 only + ``delete_on_termination`` may be changed from the current + value. -.. literalinclude:: ../../doc/api_samples/os-volumes/update-volume-req.json +**Example Update a volume attachment (v2.85): JSON request** + +.. literalinclude:: ../../doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json :language: javascript Response @@ -187,6 +255,11 @@ Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNo .. note:: From v2.20 detach a volume from an instance in SHELVED or SHELVED_OFFLOADED state is allowed. +.. note:: This is an asynchronous API, callers should poll the list + of volume attachments provided by ``GET + /servers/{server_id}/os-volume_attachments`` to determine when the + detachment of the volume has completed successfully. + Request ------- diff --git a/api-ref/source/os-volumes.inc b/api-ref/source/os-volumes.inc index 1b99a80896f..1f711978900 100644 --- a/api-ref/source/os-volumes.inc +++ b/api-ref/source/os-volumes.inc @@ -10,7 +10,7 @@ deprecated all the proxy APIs and users should use the native APIs instead. These will fail with a 404 starting from microversion 2.36. See: `Relevant Volume APIs - `__. + `__. Manages volumes and snapshots for use with the Compute API. Lists, shows details, creates, and deletes volumes and snapshots. @@ -50,7 +50,7 @@ Response - displayDescription: display_description - displayName: display_name - id: volume_id_resp - - metadata: metadata_object + - metadata: metadata_object - size: size - snapshotId: snapshot_id - status: volume_status @@ -110,7 +110,7 @@ Response - displayName: display_name - displayDescription: display_description - id: volume_id_resp - - metadata: metadata_object + - metadata: metadata_object - size: size - snapshotId: snapshot_id - status: volume_status @@ -158,7 +158,7 @@ Response - displayName: display_name - displayDescription: display_description - id: volume_id_resp - - metadata: metadata_object + - metadata: metadata_object - size: size - snapshotId: snapshot_id - status: volume_status @@ -205,7 +205,7 @@ Response - displayName: display_name - displayDescription: display_description - id: volume_id_resp - - metadata: metadata_object + - metadata: metadata_object - size: size - snapshotId: snapshot_id - status: volume_status diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index 74621f2e226..5ea19faab93 100644 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -25,7 +25,6 @@ tag_location: in: header required: true type: string - min_version: 2.26 x-compute-request-id_resp: description: | The local request ID, which is a unique ID generated automatically @@ -324,7 +323,6 @@ tag: in: path required: true type: string - min_version: 2.26 tenant_id: description: | The UUID of the tenant in a multi-tenancy cloud. @@ -398,7 +396,7 @@ all_tenants_query: description: | Specify the ``all_tenants`` query parameter to list all instances for all projects. By default this is only allowed by administrators. - If the value of this parameter is not specified, it is treated as + If this parameter is specified without a value, the value defaults to ``True``. If the value is specified, ``1``, ``t``, ``true``, ``on``, ``y`` and ``yes`` are treated as ``True``. ``0``, ``f``, ``false``, ``off``, ``n`` and ``no`` are treated as ``False``. @@ -419,8 +417,9 @@ availability_zone_query_server: description: | Filter the server list result by server availability zone. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -453,6 +452,70 @@ changes-since: in: query required: false type: string +changes_before_instance_action: + description: | + Filters the response by a date and time stamp when the instance actions last changed. + Those instances that changed before or equal to the specified date and time stamp + are returned. + + The date and time stamp format is `ISO 8601 `_: + :: + + CCYY-MM-DDThh:mm:ss±hh:mm + + The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. + For example, ``2015-08-27T09:49:58-05:00``. + If you omit the time zone, the UTC time zone is assumed. + When both ``changes-since`` and ``changes-before`` are specified, + the value of the ``changes-before`` must be later than or equal to + the value of the ``changes-since`` otherwise API will return 400. + in: query + required: false + type: string + min_version: 2.66 +changes_before_migration: + description: | + Filters the response by a date and time stamp when the migration last + changed. Those migrations that changed before or equal to the specified date and time + stamp are returned. + + The date and time stamp format is `ISO 8601 `_: + :: + + CCYY-MM-DDThh:mm:ss±hh:mm + + The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. + For example, ``2015-08-27T09:49:58-05:00``. + If you omit the time zone, the UTC time zone is assumed. + When both ``changes-since`` and ``changes-before`` are specified, + the value of the ``changes-before`` must be later than or equal to + the value of the ``changes-since`` otherwise API will return 400. + in: query + required: false + type: string + min_version: 2.66 +changes_before_server: + description: | + Filters the response by a date and time stamp when the server last changed. + Those servers that changed before or equal to the specified date and time stamp + are returned. To help keep track of changes this may also return recently deleted + servers. + + The date and time stamp format is `ISO 8601 `_: + :: + + CCYY-MM-DDThh:mm:ss±hh:mm + + The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. + For example, ``2015-08-27T09:49:58-05:00``. + If you omit the time zone, the UTC time zone is assumed. + When both ``changes-since`` and ``changes-before`` are specified, + the value of the ``changes-before`` must be later than or equal to + the value of the ``changes-since`` otherwise API will return 400. + in: query + required: false + type: string + min_version: 2.66 changes_since_instance_action: description: | Filters the response by a date and time stamp when the instance action last @@ -466,6 +529,9 @@ changes_since_instance_action: The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. + When both ``changes-since`` and ``changes-before`` are specified, + the value of the ``changes-since`` must be earlier than or equal to + the value of the ``changes-before`` otherwise API will return 400. in: query required: false type: string @@ -483,6 +549,9 @@ changes_since_migration: The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. + When both ``changes-since`` and ``changes-before`` are specified, + the value of the ``changes-since`` must be earlier than or equal to + the value of the ``changes-before`` otherwise API will return 400. in: query required: false type: string @@ -501,6 +570,9 @@ changes_since_server: The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. + When both ``changes-since`` and ``changes-before`` are specified, + the value of the ``changes-since`` must be earlier than or equal to + the value of the ``changes-before`` otherwise API will return 400. in: query required: false type: string @@ -508,8 +580,9 @@ config_drive_query_server: description: | Filter the server list result by the config drive setting of the server. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -526,8 +599,9 @@ created_at_query_server: For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -626,11 +700,17 @@ exclude: flavor_is_public_query: in: query required: false - type: boolean + type: string description: | - Filters the flavor list by only public flavors. By default ``non - admin`` users only see public flavors, and ``admin`` users can see - additional non public flavors. + This parameter is only applicable to users with the administrative role. + For all other non-admin users, the parameter is ignored and only public + flavors will be returned. Filters the flavor list based on whether the + flavor is public or private. If the value of this parameter is not + specified, it is treated as ``True``. If the value is specified, ``1``, + ``t``, ``true``, ``on``, ``y`` and ``yes`` are treated as ``True``. ``0``, + ``f``, ``false``, ``off``, ``n`` and ``no`` are treated as ``False`` + (they are case-insensitive). If the value is ``None`` (case-insensitive) + both public and private flavors will be listed in a single request. flavor_query: description: | Filters the response by a flavor, as a UUID. A flavor is a combination of memory, @@ -657,8 +737,10 @@ hostname_query_server: description: | Filter the server list result by the host name of server. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is only valid when specified by administrators until + microversion 2.90, after which it can be specified by all users. + If non-admin users specify this parameter before microversion 2.90, it is + ignored. in: query required: false type: string @@ -811,8 +893,9 @@ key_name_query_server: description: | Filter the server list result by keypair name. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -865,8 +948,9 @@ launched_at_query_server: For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -898,6 +982,18 @@ locked_by_query_server: in: query required: false type: string +locked_query_server: + description: | + Specify the ``locked`` query parameter to list all locked or unlocked + instances. If the value is specified, ``1``, ``t``, ``true``, + ``on``, ``y`` and ``yes`` are treated as ``True``. ``0``, ``f``, + ``false``, ``off``, ``n`` and ``no`` are treated as ``False``. + (They are case-insensitive.) Any other value provided will be considered + invalid. + in: query + required: false + type: boolean + min_version: 2.73 marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited @@ -961,7 +1057,12 @@ migration_status: type: string migration_type: description: | - The type of migration to filter. + The type of migration to filter. Valid values are: + + * ``evacuation`` + * ``live-migration`` + * ``migration`` + * ``resize`` in: query required: false type: string @@ -973,7 +1074,7 @@ minDisk: type: integer minRam: description: | - Filters the response by a minimum RAM, in MB. For example, ``512``. + Filters the response by a minimum RAM, in MiB. For example, ``512``. in: query required: false type: integer @@ -1027,18 +1128,27 @@ power_state_query_server: 6: CRASHED 7: SUSPENDED - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. progress_query_server: description: | Filter the server list result by the progress of the server. The value could be from 0 to 100 as integer. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: integer +project_id_query_migrations: + description: | + Filter the migrations by the given project ID. + in: query + required: false + type: string + min_version: 2.80 project_id_query_server: description: | Filter the list of servers by the given project ID. @@ -1109,7 +1219,7 @@ server_status_query: description: | Filters the response by a server status, as a string. For example, ``ACTIVE``. - Up to microversion 2.37, an empty list is returnd if an invalid status is + Up to microversion 2.37, an empty list is returned if an invalid status is specified. Starting from microversion 2.38, a 400 error is returned in that case. in: query @@ -1136,7 +1246,7 @@ sort_dir_flavor: Sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural - sorting direction of the direction of the flavor ``sort_key`` attribute. + sorting direction of the flavor ``sort_key`` attribute. in: query required: false type: string @@ -1145,7 +1255,7 @@ sort_dir_server: Sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``desc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural - sorting direction of the direction of the server ``sort_key`` attribute. + sorting direction of the server ``sort_key`` attribute. in: query required: false type: string @@ -1154,7 +1264,23 @@ sort_key_flavor: Sorts by a flavor attribute. Default attribute is ``flavorid``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the flavor - ``sort_key`` attribute. + ``sort_key`` attribute. The sort keys are limited to: + + - ``created_at`` + - ``description`` + - ``disabled`` + - ``ephemeral_gb`` + - ``flavorid`` + - ``id`` + - ``is_public`` + - ``memory_mb`` + - ``name`` + - ``root_gb`` + - ``rxtx_factor`` + - ``swap`` + - ``updated_at`` + - ``vcpu_weight`` + - ``vcpus`` in: query required: false type: string @@ -1182,6 +1308,7 @@ sort_key_server: - ``key_name`` - ``launch_index`` - ``launched_at`` + - ``locked`` (New in version 2.73) - ``locked_by`` - ``node`` - ``power_state`` @@ -1253,8 +1380,9 @@ task_state_query_server: description: | Filter the server list result by task state. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. tenant_id_query: description: | Specify the project ID (tenant ID) to show the rate and absolute limits. @@ -1274,8 +1402,9 @@ terminated_at_query_server: For example, ``2015-08-27T09:49:58-05:00``. If you omit the time zone, the UTC time zone is assumed. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -1298,6 +1427,13 @@ usage_marker: required: false type: string min_version: 2.40 +user_id_query_migrations: + description: | + Filter the migrations by the given user ID. + in: query + required: false + type: string + min_version: 2.80 user_id_query_quota: description: | ID of user to list the quotas for. @@ -1314,8 +1450,9 @@ user_id_query_server: description: | Filter the list of servers by the given user ID. - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -1344,8 +1481,9 @@ vm_state_query_server: - ``STOPPED`` - ``SUSPENDED`` - This parameter is only valid when specified by administrators. - If non-admin users specify this parameter, it is ignored. + This parameter is restricted to administrators until microversion 2.83. + If non-admin users specify this parameter on a microversion less than 2.83, + it will be ignored. in: query required: false type: string @@ -1416,12 +1554,8 @@ address: type: string addresses: description: | - The addresses for the server. Addresses information is hidden for any server - in a state set in the ``hide_server_address_states`` configuration option. - By default, servers in ``building`` state hide their addresses information. - See ``nova.conf`` `configuration options - `_ - for more information. + The addresses for the server. Servers with status ``BUILD`` hide their + addresses information. in: body required: true type: object @@ -1531,12 +1665,26 @@ aggregate_az: in: body required: true type: string -aggregate_az_optional: +aggregate_az_optional_create: + description: | + The availability zone of the host aggregate. You should use a custom + availability zone rather than the default returned by the + os-availability-zone API. The availability zone must not include ':' + in its name. + in: body + required: false + type: string +aggregate_az_optional_update: description: | The availability zone of the host aggregate. You should use a custom availability zone rather than the default returned by the os-availability-zone API. The availability zone must not include ':' in its name. + + .. warning:: You should not change or unset the availability zone of an + aggregate when that aggregate has hosts which contain servers in it + since that may impact the ability for those servers to move to another + host. in: body required: false type: string @@ -1552,9 +1700,26 @@ aggregate_id_body: in: body required: true type: integer -aggregate_metadata: +aggregate_metadata_request: + description: | + Metadata key and value pairs associated with the aggregate. + The maximum size for each metadata key and value pair is 255 bytes. + + New keys will be added to existing aggregate metadata. For existing + keys, if the value is ``null`` the entry is removed, otherwise the + value is updated. Note that the special ``availability_zone`` metadata + entry cannot be unset to ``null``. + + .. warning:: You should not change the availability zone of an + aggregate when that aggregate has hosts which contain servers in it + since that may impact the ability for those servers to move to another + host. + in: body + required: true + type: object +aggregate_metadata_response: description: | - Metadata key and value pairs associate with the aggregate. + Metadata key and value pairs associated with the aggregate. in: body required: true type: object @@ -1613,30 +1778,59 @@ associate_host: in: body required: true type: string +attachment_bdm_id_resp: + description: | + The UUID of the block device mapping record in Nova for the attachment. + in: body + required: true + type: string + min_version: 2.89 +attachment_device_put_req: + description: | + Name of the device in the attachment object, such as, ``/dev/vdb``. + in: body + required: false + type: string + min_version: 2.85 attachment_device_resp: description: | Name of the device in the attachment object, such as, ``/dev/vdb``. in: body required: false type: string -attachment_id_required: +attachment_id_put_req: description: | The UUID of the attachment. in: body - required: true + required: false type: string + min_version: 2.85 attachment_id_resp: description: | The UUID of the attachment. in: body required: false type: string +attachment_server_id_put_req: + description: | + The UUID of the server. + in: body + required: false + type: string + min_version: 2.85 attachment_server_id_resp: description: | The UUID of the server. in: body required: false type: string +attachment_volume_id_resp: + description: | + The UUID of the associated volume attachment in Cinder. + in: body + required: true + type: string + min_version: 2.89 attachment_volumeId_resp: description: | The UUID of the attached volume. @@ -1661,6 +1855,15 @@ availability_zone_state: in: body required: true type: object +availability_zone_unshelve: + description: | + The availability zone name. Specifying an availability zone is only + allowed when the server status is ``SHELVED_OFFLOADED`` otherwise a + 409 HTTPConflict response is returned. + in: body + required: false + type: string + min_version: 2.77 available: description: | Returns true if the availability zone is available. @@ -1836,6 +2039,11 @@ boot_index: in: body required: true type: integer +cache: + description: A list of image objects to cache. + in: body + required: true + type: array certificate: description: | The certificate object. @@ -1880,7 +2088,7 @@ code: type: string config_drive: description: | - Indicates whether a configuration drive enables metadata injection. The config_drive + Indicates whether a config drive enables metadata injection. The config_drive setting provides information about a drive that the instance can mount at boot time. The instance reads files from the drive to get information that is normally available through the metadata service. This metadata is different from the user @@ -1904,6 +2112,15 @@ config_drive_resp: in: body required: true type: string +config_drive_resp_update_rebuild: + description: | + Indicates whether or not a config drive was used for this server. + The value is ``True`` or an empty string. An empty string stands for + ``False``. + in: body + required: true + type: string + min_version: 2.75 configure_project_cloudpipe: description: | VPN IP and Port information to configure the cloudpipe instance.. @@ -2019,6 +2236,7 @@ cpu_info: in: body required: true type: object + max_version: 2.87 create_info: description: | Information for snapshot creation. @@ -2073,9 +2291,20 @@ createImage: type: object current_workload: description: | - The current_workload is the number of tasks the hypervisor is responsible for. This will be - equal or greater than the number of active VMs on the system (it can be greater when VMs - are being deleted and the hypervisor is still cleaning up). + The current_workload is the number of tasks the hypervisor is responsible + for. This will be equal or greater than the number of active VMs on the + system (it can be greater when VMs are being deleted and the hypervisor is + still cleaning up). + in: body + required: true + type: integer + max_version: 2.87 +current_workload_total: + description: | + The current_workload is the number of tasks the hypervisors are responsible + for. This will be equal or greater than the number of active VMs on the + systems (it can be greater when VMs are being deleted and a hypervisor is + still cleaning up). in: body required: true type: integer @@ -2092,6 +2321,30 @@ delete_on_termination: in: body required: false type: boolean +delete_on_termination_attachments_req: + description: | + To delete the attached volume when the server is destroyed, specify ``true``. + Otherwise, specify ``false``. Default: ``false`` + in: body + required: false + type: boolean + min_version: 2.79 +delete_on_termination_attachments_resp: + description: | + A flag indicating if the attached volume will be deleted when the server is + deleted. + in: body + required: true + type: boolean + min_version: 2.79 +delete_on_termination_put_req: + description: | + A flag indicating if the attached volume will be deleted when the server is + deleted. + in: body + required: false + type: boolean + min_version: 2.85 deleted: description: | A boolean indicates whether this aggregate is deleted or not, if it has @@ -2182,6 +2435,20 @@ device_tag_bdm_attachment: required: false type: string min_version: 2.49 +device_tag_bdm_attachment_put_req: + description: | + The device tag applied to the volume block device or ``null``. + in: body + required: true + type: string + min_version: 2.85 +device_tag_bdm_attachment_resp: + description: | + The device tag applied to the volume block device or ``null``. + in: body + required: true + type: string + min_version: 2.70 device_tag_nic: description: | A device role tag that can be applied to a network interface. The guest OS @@ -2206,12 +2473,36 @@ device_tag_nic_attachment: required: false type: string min_version: 2.49 +device_tag_nic_attachment_resp: + description: | + The device tag applied to the virtual network interface or ``null``. + in: body + required: true + type: string + min_version: 2.70 device_type: description: | The device type. For example, ``disk``, ``cdrom``. in: body required: false type: string +device_volume_type: + description: | + The device ``volume_type``. This can be used to specify the type of volume + which the compute service will create and attach to the server. + If not specified, the block storage service will provide a default volume + type. See the `block storage volume types API `_ + for more details. + There are some restrictions on ``volume_type``: + + - It can be a volume type ID or name. + - It is only supported with ``source_type`` of ``blank``, ``image`` or + ``snapshot``. + - It is only supported with ``destination_type`` of ``volume``. + in: body + required: false + type: string + min_version: 2.67 # Optional input parameter in the body for PUT /os-services/{service_id} added # in microversion 2.53. disabled_reason_2_53_in: @@ -2229,22 +2520,28 @@ disabled_reason_body: type: string disk_available_least: description: | - The actual free disk on this hypervisor(in GB). + The actual free disk on this hypervisor(in GiB). If allocation ratios used + for overcommit are configured, this may be negative. This is intentional as + it provides insight into the amount by which the disk is overcommitted. in: body required: true type: integer + max_version: 2.87 disk_available_least_total: description: | - The actual free disk on all hypervisors(in GB). + The actual free disk on all hypervisors(in GiB). If allocation ratios used + for overcommit are configured, this may be negative. This is intentional as + it provides insight into the amount by which the disk is overcommitted. in: body required: true type: integer disk_bus: description: | Disk bus type, some hypervisors (currently only libvirt) support - specify this parameter. Some example disk_bus values can be: `ide`, - `usb`, `virtio`, `scsi`. This is not an exhaustive list as it depends - on the virtualization driver, and may change as more support is added. + specify this parameter. Some example disk_bus values can be: ``fdc``, + ``ide``, ``sata``, ``scsi``, ``usb``, ``virtio``, ``xen``, ``lxc`` + and ``uml``. Support for each bus type depends on the virtualization driver + and underlying hypervisor. in: body required: false type: string @@ -2374,6 +2671,13 @@ event: in: body required: true type: string +event_details: + min_version: 2.84 + description: | + Details of the event. May be ``null``. + in: body + required: true + type: string event_finish_time: description: | The date and time when the event was finished. The date and time @@ -2414,9 +2718,16 @@ event_hostId: type: string event_name: description: | - The event name. A valid value is ``network-changed``, ``network-vif-plugged``, - ``network-vif-unplugged``, ``network-vif-deleted``, or ``volume-extended``. - The event name ``volume-extended`` is added since microversion ``2.51``. + The event name. A valid value is: + + - ``network-changed`` + - ``network-vif-plugged`` + - ``network-vif-unplugged`` + - ``network-vif-deleted`` + - ``volume-extended`` (since microversion ``2.51``) + - ``power-update`` (since microversion ``2.76``) + - ``accelerator-request-bound`` (since microversion ``2.82``) + in: body required: true type: string @@ -2450,7 +2761,15 @@ event_status: type: string event_tag: description: | - A string value that identifies the event. + A string value that identifies the event. Certain types of events require + specific tags: + + - For the ``accelerator-request-bound`` event, the tag must be + the accelerator request UUID. + - For the ``power-update`` event the tag must be either be ``POWER_ON`` + or ``POWER_OFF``. + - For the ``volume-extended`` event the tag must be the volume id. + in: body required: false type: string @@ -2466,7 +2785,7 @@ event_traceback: type: string events: description: | - The action. + List of external events to process. in: body required: true type: array @@ -2692,7 +3011,6 @@ flavor_description_required: type: string in: body required: true - min_version: 2.55 description: | A free form description of the flavor. Limited to 65535 characters in length. Only printable characters are allowed. @@ -2703,6 +3021,12 @@ flavor_description_resp: required: true type: string min_version: 2.55 +flavor_description_resp_no_min: + description: | + The description of the flavor. + in: body + required: true + type: string flavor_disabled: in: body required: false @@ -2717,7 +3041,7 @@ flavor_disk: description: | The size of the root disk that will be created in GiB. If 0 the root disk will be set to exactly the size of the image used to - deploy the instance. However, in this case filter scheduler cannot + deploy the instance. However, in this case the scheduler cannot select the compute host based on the virtual image size. Therefore, 0 should only be used for volume booted instances or for testing purposes. Volume-backed instances can be enforced for flavors with @@ -2825,7 +3149,7 @@ flavor_links_2_46: description: | Links to the flavor resource. See `API Guide / Links and References - `_ + `_ for more info. in: body required: true @@ -2896,6 +3220,8 @@ flavor_swap: The size of a dedicated swap disk that will be allocated, in MiB. If 0 (the default), no dedicated swap disk will be created. Currently, the empty string ('') is used to represent 0. + As of microversion 2.75 default return value of swap is 0 + instead of empty string. in: body required: true type: integer @@ -2968,16 +3294,22 @@ floating_ip_obj: type: object floating_ip_pool_name: description: | - The name of the floating ip pool. + The name of the floating IP pool. in: body required: true type: string floating_ip_pool_name_optional: description: | - The name of the floating ip pool + The name of the floating IP pool in: body required: false type: string +floating_ip_pool_name_or_id: + description: | + The name or ID of the floating IP pool. + in: body + required: true + type: string floating_ip_pools: description: | The ``floating_ip_pools`` object. @@ -3048,6 +3380,7 @@ force_evacuate: required: false type: boolean min_version: 2.29 + max_version: 2.67 force_live_migrate: description: | Force a live-migration by not verifying the provided destination host by @@ -3061,13 +3394,13 @@ force_live_migrate: required: false type: boolean min_version: 2.30 + max_version: 2.67 force_migration_complete: description: | The action to force an in-progress live migration to complete. in: body required: true type: none - min_version: 2.22 force_snapshot: description: | Indicates whether to create a snapshot, even if the volume is attached. @@ -3079,8 +3412,9 @@ force_snapshot: forced_down_2_11: description: | Whether or not this service was forced down manually by an - administrator. This value is useful to know that some 3rd party has - verified the service should be marked down. + administrator after the service was fenced. This value is useful + to know that some 3rd party has verified the service should be + marked down. in: body required: true type: boolean @@ -3089,9 +3423,17 @@ forced_down_2_11: # PUT /os-services/{service_id} added in 2.53. forced_down_2_53_in: description: | - Whether or not this service was forced down manually by an - administrator. This value is useful to know that some 3rd party has - verified the service should be marked down. + ``forced_down`` is a manual override to tell nova that the service in + question has been fenced manually by the operations team (either hard + powered off, or network unplugged). That signals that it is safe to proceed + with ``evacuate`` or other operations that nova has safety checks to + prevent for hosts that are up. + + .. warning:: + + Setting a service forced down without completely fencing it will likely + result in the corruption of VMs on that host. + in: body required: false type: boolean @@ -3100,8 +3442,9 @@ forced_down_2_53_in: forced_down_2_53_out: description: | Whether or not this service was forced down manually by an - administrator. This value is useful to know that some 3rd party has - verified the service should be marked down. + administrator after the service was fenced. This value is useful + to know that some 3rd party has verified the service should be + marked down. in: body required: true type: boolean @@ -3113,13 +3456,16 @@ forceDelete: type: none free_ram_mb: description: | - The free RAM in this hypervisor(in MB). + The free RAM in this hypervisor(in MiB). This does not take allocation + ratios used for overcommit into account so this value may be negative. in: body required: true type: integer + max_version: 2.87 free_ram_mb_total: description: | - The free RAM on all hypervisors(in MB). + The free RAM on all hypervisors(in MiB). This does not take allocation + ratios used for overcommit into account so this value may be negative. in: body required: true type: integer @@ -3179,7 +3525,7 @@ host_cpu: type: integer host_disk_gb: description: | - The disk size on the host (in GB). + The disk size on the host (in GiB). in: body required: true type: integer @@ -3210,7 +3556,7 @@ host_maintenance_mode_in: type: string host_memory_mb: description: | - The memory size on the host (in MB). + The memory size on the host (in MiB). in: body required: true type: integer @@ -3328,6 +3674,22 @@ host_status_body_in: in: body required: false type: string +host_status_update_rebuild: + description: | + The host status. Values where next value in list can override the previous: + + - ``UP`` if nova-compute up. + - ``UNKNOWN`` if nova-compute not reported by servicegroup driver. + - ``DOWN`` if nova-compute forced down. + - ``MAINTENANCE`` if nova-compute is disabled. + - Empty string indicates there is no host for server. + + This attribute appears in the response only if the policy permits. + By default, only administrators can get this parameter. + in: body + required: false + type: string + min_version: 2.75 host_zone: description: | The available zone of the host. @@ -3398,20 +3760,25 @@ hypervisor_diagnostics: min_version: 2.48 hypervisor_free_disk_gb: description: | - The free disk remaining on this hypervisor(in GB). + The free disk remaining on this hypervisor(in GiB). This does not take + allocation ratios used for overcommit into account so this value may be + negative. in: body required: true type: integer + max_version: 2.87 hypervisor_free_disk_gb_total: description: | - The free disk remaining on all hypervisors(in GB). + The free disk remaining on all hypervisors(in GiB). This does not take + allocation ratios used for overcommit into account so this value may be + negative. in: body required: true type: integer hypervisor_hostname: description: | - The hypervisor host name provided by the Nova virt driver. For the Ironic driver, - it is the Ironic node uuid. + The hypervisor host name provided by the Nova virt driver. For the Ironic + driver, it is the Ironic node uuid. in: body required: true type: string @@ -3439,7 +3806,7 @@ hypervisor_links: description: | Links to the hypervisors resource. See `API Guide / Links and References - `_ + `_ for more info. in: body type: array @@ -3455,8 +3822,10 @@ hypervisor_os_diagnostics: hypervisor_servers: description: | A list of ``server`` objects. + This field has become mandatory in microversion 2.75. If no servers is on hypervisor + then empty list is returned. in: body - required: false + required: true type: array min_version: 2.53 hypervisor_servers_name: @@ -3509,27 +3878,41 @@ hypervisor_type_body: in: body required: true type: string +hypervisor_uptime: + description: | + The total uptime of the hypervisor and information about average load. Only + reported for active hosts where the virt driver supports this feature. + in: body + required: true + type: string + min_version: 2.88 hypervisor_vcpus: description: | - The number of vcpu in this hypervisor. + The number of vCPU in this hypervisor. This does not take allocation + ratios used for overcommit into account so there may be disparity between + this and the used count. in: body required: true type: integer + max_version: 2.87 hypervisor_vcpus_total: description: | - The number of vcpu on all hypervisors. + The number of vCPU on all hypervisors. This does not take allocation + ratios used for overcommit into account so there may be disparity between + this and the used count. in: body required: true type: integer hypervisor_vcpus_used: description: | - The number of vcpu used in this hypervisor. + The number of vCPU used in this hypervisor. in: body required: true type: integer + max_version: 2.87 hypervisor_vcpus_used_total: description: | - The number of vcpu used on all hypervisors. + The number of vCPU used on all hypervisors. in: body required: true type: integer @@ -3548,7 +3931,7 @@ hypervisors: image: description: | The UUID and links for the image for your server instance. The ``image`` object - might be an empty string when you boot the server from a volume. + will be an empty string when you boot the server from a volume. in: body required: true type: object @@ -3725,7 +4108,7 @@ injectNetworkInfo: type: none instance_action_events_2_50: description: | - The events which occurred in this action. + The events which occurred in this action in descending order of creation. Policy defaults enable only users with the administrative role to see instance action event information. Cloud providers can change these @@ -3736,7 +4119,7 @@ instance_action_events_2_50: max_version: 2.50 instance_action_events_2_51: description: | - The events which occurred in this action. + The events which occurred in this action in descending order of creation. Policy defaults enable only users with the administrative role or the owner of the server to see instance action event information. Cloud providers can @@ -3749,8 +4132,8 @@ instance_actions_next_links: description: | Links pertaining to the instance action. This parameter is returned when paging and more data is available. - See `API Guide / Links and References - `_ + See `Paginated collections + `__ for more info. in: body required: false @@ -3807,7 +4190,7 @@ instanceAction: type: object instanceActions: description: | - List of the actions for the given instance. + List of the actions for the given instance in descending order of creation. in: body required: true type: array @@ -3941,6 +4324,13 @@ key_name_resp: in: body required: true type: string +key_name_resp_update: + description: | + The name of associated key pair, if any. + in: body + required: true + type: string + min_version: 2.75 key_pairs: &key_pairs description: | The number of allowed key pairs for each user. @@ -3958,6 +4348,11 @@ key_pairs_quota_details: description: | The object of detailed key pairs quota, including in_use, limit and reserved number of key pairs. + + .. note:: ``in_use`` field value for keypair quota details is always + zero. In Nova, key_pairs are a user-level resource, not a project- + level resource, so for legacy reasons, the keypair in-use information + is not counted. in: body required: true type: object @@ -3996,7 +4391,7 @@ keypair_links: description: | Links pertaining to keypair. See `API Guide / Links and References - `_ + `_ for more info. in: body type: array @@ -4113,17 +4508,20 @@ links: description: | Links to the resources in question. See `API Guide / Links and References - `_ + `_ for more info. in: body required: true type: array local_gb: description: | - The disk in this hypervisor(in GB). + The disk in this hypervisor (in GiB). This does not take allocation + ratios used for overcommit into account so there may be disparity between + this and the used count. in: body required: true type: integer + max_version: 2.87 local_gb_simple_tenant_usage: description: | The sum of the root disk size of the server and @@ -4140,28 +4538,33 @@ local_gb_simple_tenant_usage_optional: type: integer local_gb_total: description: | - The disk on all hypervisors(in GB). + The disk on all hypervisors (in GiB). This does not take allocation + ratios used for overcommit into account so there may be disparity between + this and the used count. in: body required: true type: integer local_gb_used: description: | - The disk used in this hypervisor(in GB). + The disk used in this hypervisor (in GiB). in: body required: true type: integer + max_version: 2.87 local_gb_used_total: description: | - The disk used on all hypervisors(in GB). + The disk used on all hypervisors (in GiB). in: body required: true type: integer lock: description: | The action to lock a server. + This parameter can be ``null``. + Up to microversion 2.73, this parameter should be ``null``. in: body required: true - type: none + type: object locked: description: | True if the instance is locked otherwise False. @@ -4169,6 +4572,20 @@ locked: required: true type: boolean min_version: 2.9 +locked_reason_req: + description: | + The reason behind locking a server. Limited to 255 characters in length. + in: body + required: false + type: string + min_version: 2.73 +locked_reason_resp: + description: | + The reason behind locking a server. + in: body + required: true + type: string + min_version: 2.73 mac_addr: description: | The MAC address. @@ -4210,47 +4627,53 @@ memory_details_diagnostics: The dictionary with information about VM memory usage. Following fields are presented in the dictionary: - - ``maximum`` - Amount of memory provisioned for the VM in MB (Integer) + - ``maximum`` - Amount of memory provisioned for the VM in MiB (Integer) - ``used`` - Amount of memory that is currently used by the guest operating - system and its applications in MB (Integer) + system and its applications in MiB (Integer) in: body required: true type: array min_version: 2.48 memory_mb: description: | - The memory of this hypervisor(in MB). + The memory of this hypervisor (in MiB). This does not take allocation + ratios used for overcommit into account so there may be disparity between + this and the used count. in: body required: true type: integer + max_version: 2.87 memory_mb_simple_tenant_usage: description: | - The memory size of the server (in MB). + The memory size of the server (in MiB). in: body required: true type: integer memory_mb_simple_tenant_usage_optional: description: | - The memory size of the server (in MB). + The memory size of the server (in MiB). in: body required: false type: integer memory_mb_total: description: | - The memory of all hypervisors(in MB). + The memory of all hypervisors (in MiB). This does not take allocation + ratios used for overcommit into account so there may be disparity between + this and the used count. in: body required: true type: integer memory_mb_used: description: | - The memory used in this hypervisor(in MB). + The memory used in this hypervisor (in MiB). in: body required: true type: integer + max_version: 2.87 memory_mb_used_total: description: | - The memory used on all hypervisors(in MB). + The memory used on all hypervisors(in MiB). in: body required: true type: integer @@ -4411,8 +4834,8 @@ migration_links_2_23: Links to the migration. This parameter is returned if the migration type is ``live-migration`` and the migration status is one of ``queued``, ``preparing``, ``running`` - and ``post-migrating``. See `API Guide / Links and References - `_ + and ``post-migrating``. See `Paginated collections + `__ for more info. in: body required: false @@ -4434,8 +4857,8 @@ migration_next_links_2_59: description: | Links pertaining to the migration. This parameter is returned when paging and more data is available. - See `API Guide / Links and References - `_ + See `Paginated collections + `__ for more info. in: body required: false @@ -4480,7 +4903,7 @@ minDisk_body: type: integer minRam_body: description: | - The minimum amount of RAM an image requires to function, in MB. For example, ``512``. + The minimum amount of RAM an image requires to function, in MiB. For example, ``512``. in: body required: true type: integer @@ -4502,6 +4925,13 @@ name_server_group: in: body required: true type: string +name_update_rebuild: + description: | + The security group name. + in: body + required: true + type: string + min_version: 2.75 namespace: description: | A URL pointing to the namespace for this extension. @@ -4691,18 +5121,21 @@ os-availability-zone:availability_zone: want your instance to be built. Typically, an admin user will use availability zones to arrange OpenStack compute hosts into logical groups. + An availability zone provides a form of physical isolation and redundancy from other availability zones. For instance, if some racks in your data center are on a separate power source, you can put servers in those racks in their own availability zone. Availability zones can also help separate different classes of hardware. By segregating resources into availability zones, you can ensure that your application resources are spread across disparate machines to achieve high availability in - the event of hardware or other failure. + the event of hardware or other failure. See + `Availability Zones (AZs) `_ for more information. + You can list the available availability zones by calling the - os-availability-zone API, but you should avoid using the default - availability zone when booting the instance. In general, the - default availability zone is named ``nova``. This AZ is only shown - when listing the availability zones as an admin. + :ref:`os-availability-zone` API, but you should avoid using the `default + availability zone `_ + when creating the server. The default availability zone is named ``nova``. + This AZ is only shown when listing the availability zones as an admin. in: body required: false type: string @@ -4739,6 +5172,13 @@ OS-EXT-AZ:availability_zone_optional: in: body required: false type: string +OS-EXT-AZ:availability_zone_update_rebuild: + description: | + The availability zone name. + in: body + required: true + type: string + min_version: 2.75 OS-EXT-SRV-ATTR:host: description: | The name of the compute host on which this instance is running. @@ -4746,6 +5186,14 @@ OS-EXT-SRV-ATTR:host: in: body required: true type: string +OS-EXT-SRV-ATTR:host_update_rebuild: + description: | + The name of the compute host on which this instance is running. + Appears in the response for administrative users only. + in: body + required: true + type: string + min_version: 2.75 OS-EXT-SRV-ATTR:hypervisor_hostname: description: | The hypervisor host name provided by the Nova virt driver. For the Ironic driver, @@ -4753,6 +5201,14 @@ OS-EXT-SRV-ATTR:hypervisor_hostname: in: body required: true type: string +OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild: + description: | + The hypervisor host name provided by the Nova virt driver. For the Ironic driver, + it is the Ironic node uuid. Appears in the response for administrative users only. + in: body + required: true + type: string + min_version: 2.75 OS-EXT-SRV-ATTR:instance_name: description: | The instance name. The Compute API generates the instance name from the instance @@ -4760,6 +5216,14 @@ OS-EXT-SRV-ATTR:instance_name: in: body required: true type: string +OS-EXT-SRV-ATTR:instance_name_update_rebuild: + description: | + The instance name. The Compute API generates the instance name from the instance + name template. Appears in the response for administrative users only. + in: body + required: true + type: string + min_version: 2.75 OS-EXT-STS:power_state: description: | The power state of the instance. This is an enum value that is mapped as:: @@ -4773,18 +5237,46 @@ OS-EXT-STS:power_state: in: body required: true type: integer +OS-EXT-STS:power_state_update_rebuild: + description: | + The power state of the instance. This is an enum value that is mapped as:: + + 0: NOSTATE + 1: RUNNING + 3: PAUSED + 4: SHUTDOWN + 6: CRASHED + 7: SUSPENDED + in: body + required: true + type: integer + min_version: 2.75 OS-EXT-STS:task_state: description: | The task state of the instance. in: body required: true type: string +OS-EXT-STS:task_state_update_rebuild: + description: | + The task state of the instance. + in: body + required: true + type: string + min_version: 2.75 OS-EXT-STS:vm_state: description: | The VM state. in: body required: true type: string +OS-EXT-STS:vm_state_update_rebuild: + description: | + The VM state. + in: body + required: true + type: string + min_version: 2.75 os-extended-volumes:volumes_attached: description: | The attached volumes, if any. @@ -4794,19 +5286,39 @@ os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached.delete_on_termination: description: | A flag indicating if the attached volume will be deleted - when the server is deleted. By default this is False and - can only be set when creating a volume while creating a - server, which is commonly referred to as boot from volume. + when the server is deleted. By default this is False. in: body required: true type: boolean min_version: 2.3 +os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild: + description: | + A flag indicating if the attached volume will be deleted + when the server is deleted. By default this is False. + in: body + required: true + type: boolean + min_version: 2.75 os-extended-volumes:volumes_attached.id: description: | The attached volume ID. in: body required: true type: string +os-extended-volumes:volumes_attached.id_update_rebuild: + description: | + The attached volume ID. + in: body + required: true + type: string + min_version: 2.75 +os-extended-volumes:volumes_attached_update_rebuild: + description: | + The attached volumes, if any. + in: body + required: true + type: array + min_version: 2.75 os-getConsoleOutput: description: | The action to get console output of the server. @@ -4875,7 +5387,7 @@ os-getVNCConsole: type: object os-getVNCConsole-type: description: | - The type of VNC console. The valid values are ``novnc`` and ``xvpvnc``. + The type of VNC console. The only valid value is ``novnc``. in: body required: true type: string @@ -4920,6 +5432,24 @@ OS-SRV-USG:launched_at: in: body required: true type: string +OS-SRV-USG:launched_at_update_rebuild: + description: | + The date and time when the server was launched. + + The date and time stamp format is `ISO 8601 `_: + + :: + + CCYY-MM-DDThh:mm:ss±hh:mm + + For example, ``2015-08-27T09:49:58-05:00``. + + The ``hh±:mm`` value, if included, is the time zone as an offset from UTC. + If the ``deleted_at`` date and time stamp is not set, its value is ``null``. + in: body + required: true + type: string + min_version: 2.75 OS-SRV-USG:terminated_at: description: | The date and time when the server was deleted. @@ -4936,6 +5466,23 @@ OS-SRV-USG:terminated_at: in: body required: true type: string +OS-SRV-USG:terminated_at_update_rebuild: + description: | + The date and time when the server was deleted. + + The date and time stamp format is `ISO 8601 `_: + + :: + + CCYY-MM-DDThh:mm:ss±hh:mm + + For example, ``2015-08-27T09:49:58-05:00``. + The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. + If the ``deleted_at`` date and time stamp is not set, its value is ``null``. + in: body + required: true + type: string + min_version: 2.75 os-start: description: | The action to start a stopped server. @@ -4993,7 +5540,7 @@ os:scheduler_hints_cidr: os:scheduler_hints_different_cell: description: | A list of cell routes or a cell route (string). - Schedule the server in a cell that is not specifiled. + Schedule the server in a cell that is not specified. It is available when ``DifferentCellFilter`` is available on cloud side that is cell v1 environment. in: body @@ -5023,7 +5570,7 @@ os:scheduler_hints_query: Schedule the server by using a custom filter in JSON format. For example:: - "query": "[>=,$free_ram_mb,1024]" + "query": "[\">=\",\"$free_ram_mb\",1024]" It is available when ``JsonFilter`` is available on cloud side. in: body @@ -5040,7 +5587,7 @@ os:scheduler_hints_same_host: type: array os:scheduler_hints_target_cell: description: | - A target cell name. Schedule the server in a host in the cell specifiled. + A target cell name. Schedule the server in a host in the cell specified. It is available when ``TargetCellFilter`` is available on cloud side that is cell v1 environment. in: body @@ -5157,7 +5704,7 @@ policy_name: instead of resulting in a build failure. in: body required: true - type: object + type: string min_version: 2.64 policy_rules: description: | @@ -5272,9 +5819,23 @@ project_id: in: body required: false type: string -project_id_instance_action: +project_id_migration_2_80: description: | - The UUID of the project that this server belongs to. + The ID of the project which initiated the server migration. The value + may be ``null`` for older migration records. + in: body + required: true + type: string + min_version: 2.80 +project_id_server: + description: | + The ID of the project that this server belongs to. + in: body + required: true + type: string +project_id_server_action: + description: | + The ID of the project which initiated the server action. in: body required: true type: string @@ -5314,14 +5875,14 @@ quota_tenant_or_user_id_body: type: string ram: &ram description: | - The amount of allowed server RAM, in MB, for each tenant. + The amount of allowed server RAM, in MiB, for each tenant. in: body required: true type: integer ram_quota_class: &ram_quota_class <<: *ram description: | - The amount of allowed instance RAM, in MB, for the quota class. + The amount of allowed instance RAM, in MiB, for the quota class. ram_quota_class_optional: <<: *ram_quota_class required: false @@ -5334,7 +5895,7 @@ ram_quota_details: type: object ram_quota_optional: description: | - The amount of allowed server RAM, in MB, for each tenant. + The amount of allowed server RAM, in MiB, for each tenant. in: body required: false type: integer @@ -5375,7 +5936,7 @@ remote_console_protocol: type: string remote_console_type: description: | - The type of remote console. The valid values are ``novnc``, ``xvpvnc``, + The type of remote console. The valid values are ``novnc``, ``rdp-html5``, ``spice-html5``, ``serial``, and ``webmks``. The type ``webmks`` is added since Microversion ``2.8``. in: body @@ -5498,13 +6059,14 @@ rules: type: array running_vms: description: | - The number of running vms on this hypervisor. + The number of running VMs on this hypervisor. in: body required: true type: integer + max_version: 2.87 running_vms_total: description: | - The total number of running vms on all hypervisors. + The total number of running VMs on all hypervisors. in: body required: true type: integer @@ -5615,6 +6177,19 @@ security_groups_obj: in: body required: true type: array +security_groups_obj_optional: + description: | + One or more security groups objects. + in: body + required: false + type: array +security_groups_obj_update_rebuild: + description: | + One or more security groups objects. + in: body + required: false + type: array + min_version: 2.75 security_groups_quota: description: | The number of allowed security groups for each tenant. @@ -5672,7 +6247,7 @@ server_description_resp: min_version: 2.19 server_group: description: | - The server group obejct. + The server group object. in: body required: true type: object @@ -5710,6 +6285,14 @@ server_groups: &server_groups in: body required: true type: integer +server_groups_2_71: + description: | + The UUIDs of the server groups to which the server belongs. Currently + this can contain at most one entry. + in: body + required: true + type: array + min_version: 2.71 server_groups_list: description: | The list of existing server groups. @@ -5739,14 +6322,57 @@ server_groups_quota_optional: in: body required: false type: integer -server_hostname: +# This is the host in a POST (create instance) request body. +server_host_create: + description: | + The name of the compute service host on which the server is to be created. + The API will return 400 if no compute services are found with the given + host name. By default, it can be specified by administrators only. + in: body + required: false + type: string + min_version: 2.74 +server_hostname: &server_hostname in: body required: false type: string description: | - The hostname set on the instance when it is booted. - By default, it appears in the response for administrative users only. + The hostname of the instance reported in the metadata service. + This parameter only appears in responses for administrators until + microversion 2.90, after which it is shown for all users. + + .. note:: + + This information is published via the metadata service and requires + application such as ``cloud-init`` to propogate it through to the + instance. min_version: 2.3 +server_hostname_req: + in: body + required: false + type: string + description: | + The hostname to configure for the instance in the metadata service. + + .. note:: + + This information is published via the metadata service and requires + application such as ``cloud-init`` to propogate it through to the + instance. + min_version: 2.90 +server_hostname_update_rebuild: + <<: *server_hostname + min_version: 2.75 +# This is the hypervisor_hostname in a POST (create instance) request body. +server_hypervisor_hostname_create: + description: | + The hostname of the hypervisor on which the server is to be created. + The API will return 400 if no hypervisors are found with the given + hostname. By default, it can be specified by administrators only. + in: body + required: false + type: string + min_version: 2.74 server_id: description: | The UUID of the server. @@ -5767,6 +6393,14 @@ server_kernel_id: The UUID of the kernel image when using an AMI. Will be null if not. By default, it appears in the response for administrative users only. min_version: 2.3 +server_kernel_id_update_rebuild: + in: body + required: false + type: string + description: | + The UUID of the kernel image when using an AMI. Will be null if not. + By default, it appears in the response for administrative users only. + min_version: 2.75 server_launch_index: in: body required: false @@ -5776,11 +6410,20 @@ server_launch_index: sequence in which the servers were launched. By default, it appears in the response for administrative users only. min_version: 2.3 +server_launch_index_update_rebuild: + in: body + required: false + type: integer + description: | + When servers are launched via multiple create, this is the + sequence in which the servers were launched. + By default, it appears in the response for administrative users only. + min_version: 2.75 server_links: description: | Links pertaining to the server. See `API Guide / Links and References - `_ + `_ for more info. in: body type: array @@ -5805,6 +6448,14 @@ server_ramdisk_id: The UUID of the ramdisk image when using an AMI. Will be null if not. By default, it appears in the response for administrative users only. min_version: 2.3 +server_ramdisk_id_update_rebuild: + in: body + required: false + type: string + description: | + The UUID of the ramdisk image when using an AMI. Will be null if not. + By default, it appears in the response for administrative users only. + min_version: 2.75 server_reservation_id: in: body required: false @@ -5815,6 +6466,16 @@ server_reservation_id: create, that will all have the same reservation_id. By default, it appears in the response for administrative users only. min_version: 2.3 +server_reservation_id_update_rebuild: + in: body + required: false + type: string + description: | + The reservation id for the server. This is an id that can + be useful in tracking groups of servers created with multiple + create, that will all have the same reservation_id. + By default, it appears in the response for administrative users only. + min_version: 2.75 server_root_device_name: in: body required: false @@ -5823,6 +6484,14 @@ server_root_device_name: The root device name for the instance By default, it appears in the response for administrative users only. min_version: 2.3 +server_root_device_name_update_rebuild: + in: body + required: false + type: string + description: | + The root device name for the instance + By default, it appears in the response for administrative users only. + min_version: 2.75 server_status: description: | The server status. @@ -5849,6 +6518,77 @@ server_tags_create: required: false type: array min_version: 2.52 +server_topology_nodes: + description: | + NUMA nodes information of a server. + in: body + required: true + type: array +server_topology_nodes_cpu_pinning: + description: | + The mapping of server cores to host physical CPU. for example:: + + cpu_pinning: { 0: 0, 1: 5} + + This means vcpu 0 is mapped to physical CPU 0, and vcpu 1 is mapped + physical CPU 5. + + By default the ``cpu_pinning`` field is only visible to users with the + administrative role. You can change the default behavior via the policy + rule:: + + compute:server:topology:host:index + in: body + required: false + type: dict +server_topology_nodes_cpu_siblings: + description: | + A mapping of host cpus thread sibling. For example:: + + siblings: [[0,1],[2,3]] + + This means vcpu 0 and vcpu 1 belong to same CPU core, vcpu 2, vcpu 3 + belong to another CPU core. + + By default the ``siblings`` field is only visible to users with the + administrative role. You can change the default behavior via the policy + rule:: + + compute:server:topology:host:index + in: body + required: false + type: list +server_topology_nodes_host_node: + description: | + The host NUMA node the virtual NUMA node is map to. + + By default the ``host_node`` field is only visible to users with the + administrator role. You can change the default behavior via the policy + rule:: + + compute:server:topology:host:index + in: body + required: false + type: integer +server_topology_nodes_memory_mb: + description: | + The amount of memory assigned to this NUMA node in MB. + in: body + required: false + type: integer +server_topology_nodes_vcpu_set: + description: | + A list of IDs of the virtual CPU assigned to this NUMA node. + in: body + required: false + type: list +server_topology_pagesize_kb: + description: | + The page size in KB of a server. This field is ``null`` if the + page size information is not available. + in: body + required: true + type: integer server_trusted_image_certificates_create_req: description: | A list of trusted certificate IDs, which are used during image @@ -5904,6 +6644,14 @@ server_user_data: The user_data the instance was created with. By default, it appears in the response for administrative users only. min_version: 2.3 +server_user_data_update: + in: body + required: false + type: string + description: | + The user_data the instance was created with. + By default, it appears in the response for administrative users only. + min_version: 2.75 server_uuid: description: | The UUID of the server instance to which the API dispatches the event. You must @@ -5922,8 +6670,8 @@ servers_links: description: | Links to the next server. It is available when the number of servers exceeds ``limit`` parameter or ``[api]/max_limit`` in the configuration file. - See `API Guide / Links and References - `_ + See `Paginated collections + `__ for more info. in: body type: array @@ -6215,6 +6963,12 @@ tags: required: true type: array min_version: 2.26 +tags_no_min: + description: | + A list of tags. The maximum count of tags in this list is 50. + in: body + required: true + type: array tenant_id_body: description: | The UUID of the tenant in a multi-tenancy cloud. @@ -6297,7 +7051,7 @@ total_local_gb_usage: type: float total_memory_mb_usage: description: | - Multiplying the server memory size (in MB) by hours the server exists, + Multiplying the server memory size (in MiB) by hours the server exists, and then adding that all together for each server. in: body required: true @@ -6465,7 +7219,7 @@ usage_links: description: | Links pertaining to usage. See `API Guide / Links and References - `_ + `_ for more info. in: body type: array @@ -6505,6 +7259,20 @@ user_id: in: body required: true type: string +user_id_migration_2_80: + description: | + The ID of the user which initiated the server migration. The value + may be ``null`` for older migration records. + in: body + required: true + type: string + min_version: 2.80 +user_id_server_action: + description: | + The ID of the user which initiated the server action. + in: body + required: true + type: string user_id_server_group: description: | The user ID who owns the server group. @@ -6625,6 +7393,13 @@ volume: in: body required: true type: object +volume_attachment_id_resp: + description: | + The volume ID of the attachment. + in: body + required: true + type: string + max_version: 2.88 volume_id: description: | The source volume ID. @@ -6639,9 +7414,20 @@ volume_id_resp: type: string volume_size: description: | - The size of the volume (in GB). + The size of the volume (in GiB). This is integer value from range 1 to 2147483647 which can be requested as integer and string. + This parameter must be specified in the following cases: + + - An image to volume case + + * ``block_device_mapping_v2.source_type`` is ``image`` + * ``block_device_mapping_v2.destination_type`` is ``volume`` + + - A blank to volume case + + * ``block_device_mapping_v2.source_type`` is ``blank`` + * ``block_device_mapping_v2.destination_type`` is ``volume`` in: body required: false type: integer @@ -6683,7 +7469,8 @@ volumeAttachment_post: volumeAttachment_put: description: | A dictionary representation of a volume attachment containing the field - ``volumeId`` which is the UUID of the replacement volume. + ``volumeId`` which is the UUID of the replacement volume, and other fields + to update in the attachment. in: body required: true type: object diff --git a/api-ref/source/request-ids.inc b/api-ref/source/request-ids.inc index 76c0efafedb..4df4c40b9ca 100644 --- a/api-ref/source/request-ids.inc +++ b/api-ref/source/request-ids.inc @@ -8,7 +8,7 @@ Users can specify the global request ID in the request header. Users can receive the local request ID in the response header. For more details about Request IDs, please reference: `Faults -`_ +`_ **Request** diff --git a/api-ref/source/server-migrations.inc b/api-ref/source/server-migrations.inc index f0d45f2d444..52e413470ff 100644 --- a/api-ref/source/server-migrations.inc +++ b/api-ref/source/server-migrations.inc @@ -53,10 +53,12 @@ Response - status: migrate_status - updated_at: updated - uuid: migration_uuid + - user_id: user_id_migration_2_80 + - project_id: project_id_migration_2_80 -**Example List Migrations (2.59)** +**Example List Migrations (2.80)** -.. literalinclude:: ../../doc/api_samples/server-migrations/v2.59/migrations-index.json +.. literalinclude:: ../../doc/api_samples/server-migrations/v2.80/migrations-index.json :language: javascript Show Migration Details @@ -107,10 +109,12 @@ Response - status: migrate_status - updated_at: updated - uuid: migration_uuid + - user_id: user_id_migration_2_80 + - project_id: project_id_migration_2_80 -**Example Show Migration Details (2.59)** +**Example Show Migration Details (2.80)** -.. literalinclude:: ../../doc/api_samples/server-migrations/v2.59/migrations-get.json +.. literalinclude:: ../../doc/api_samples/server-migrations/v2.80/migrations-get.json :language: javascript Force Migration Complete Action (force_complete Action) @@ -124,9 +128,11 @@ Specify the ``force_complete`` action in the request body. .. note:: Microversion 2.22 or greater is required for this API. -.. note:: Not all compute back ends support forcefully completing an +.. note:: Not all `compute back ends`_ support forcefully completing an in-progress live migration. +.. _compute back ends: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_force_live_migration_to_complete + Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. @@ -148,9 +154,11 @@ to determine whether the request succeeded. **Troubleshooting** -If the server status remains ``ACTIVE`` for an inordinate amount of time, the -request may have failed. Ensure you meet the preconditions and run the request -again. If the request fails again, investigate the compute back end. +If the server status remains ``MIGRATING`` for an inordinate amount of time, +the request may have failed. Ensure you meet the preconditions and run the +request again. If the request fails again, investigate the compute back end. +More details can be found in the +`admin guide `_. Normal response codes: 202 @@ -187,9 +195,11 @@ Abort an in-progress live migration. .. note:: With microversion 2.65 or greater, you can abort live migrations also in ``queued`` and ``preparing`` status. -.. note:: Not all compute back ends support aborting an in-progress live +.. note:: Not all `compute back ends`__ support aborting an in-progress live migration. +.. __: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_abort_in_progress_live_migration + Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. @@ -215,7 +225,7 @@ using:: **Troubleshooting** -If the server task_state remains ``migrating`` for an inordinate amount of +If the server status remains ``MIGRATING`` for an inordinate amount of time, the request may have failed. Ensure you meet the preconditions and run the request again. If the request fails again, investigate the compute back end. diff --git a/api-ref/source/server-topology.inc b/api-ref/source/server-topology.inc new file mode 100644 index 00000000000..014f713fa04 --- /dev/null +++ b/api-ref/source/server-topology.inc @@ -0,0 +1,52 @@ +.. -*- rst -*- + +===================================== +Servers Topology (servers, topology) +===================================== + +Shows the NUMA topology information for a server. + +Show Server Topology +==================== + +.. rest_method:: GET /servers/{server_id}/topology +.. versionadded:: 2.78 + +Shows NUMA topology information for a server. + +Policy defaults enable only users with the administrative role or the owners +of the server to perform this operation. Cloud providers can change these +permissions through the ``policy.json`` file. + +Normal response codes: 200 + +Error response codes: unauthorized(401), notfound(404), forbidden(403) + +Request +------- + +.. rest_parameters:: parameters.yaml + + - server_id: server_id_path + +Response +-------- + +All response fields are listed below. If some information is not available or +not allow by policy, the corresponding key value will not exist in response. + +.. rest_parameters:: parameters.yaml + + - nodes: server_topology_nodes + - nodes.cpu_pinning: server_topology_nodes_cpu_pinning + - nodes.vcpu_set: server_topology_nodes_vcpu_set + - nodes.siblings: server_topology_nodes_cpu_siblings + - nodes.memory_mb: server_topology_nodes_memory_mb + - nodes.host_node: server_topology_nodes_host_node + - pagesize_kb: server_topology_pagesize_kb + +**Example Server topology (2.xx)** + +.. literalinclude:: ../../doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json + :language: javascript + diff --git a/api-ref/source/servers-action-evacuate.inc b/api-ref/source/servers-action-evacuate.inc index 35fe67eccbb..8ae3d22093c 100644 --- a/api-ref/source/servers-action-evacuate.inc +++ b/api-ref/source/servers-action-evacuate.inc @@ -11,6 +11,16 @@ Evacuates a server from a failed host to a new host. - In the request body, if ``onSharedStorage`` is set, then do not set ``adminPass``. - The target host should not be the same as the instance host. +**Preconditions** + +- The failed host must be fenced and no longer running the original server. +- The failed host must be reported as down or marked as forced down using + `Update Forced Down`_. + +Starting from API version 2.68, the ``force`` parameter is no longer accepted +as this could not be meaningfully supported by servers with complex resource +allocations. + Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), diff --git a/api-ref/source/servers-action-remote-consoles.inc b/api-ref/source/servers-action-remote-consoles.inc index 105a00dd10a..582365c2fce 100644 --- a/api-ref/source/servers-action-remote-consoles.inc +++ b/api-ref/source/servers-action-remote-consoles.inc @@ -11,7 +11,7 @@ Gets an `RDP `__ con .. warning:: This action is deprecated in microversion 2.5 and superseded - by the API `Server Remote Consoles`_ in microversion 2.6. + by the API `Server Consoles`_ in microversion 2.6. The new API offers a unified API for different console types. The only supported connect type is ``rdp-html5``. The ``type`` parameter should @@ -64,7 +64,7 @@ Gets a serial console for a server. .. warning:: This action is deprecated in microversion 2.5 and superseded - by the API `Server Remote Consoles`_ in microversion 2.6. + by the API `Server Consoles`_ in microversion 2.6. The new API offers a unified API for different console types. Specify the ``os-getSerialConsole`` action in the request body. @@ -117,7 +117,7 @@ Gets a SPICE console for a server. .. warning:: This action is deprecated in microversion 2.5 and superseded - by the API `Server Remote Consoles`_ in microversion 2.6. + by the API `Server Consoles`_ in microversion 2.6. The new API offers a unified API for different console types. Specify the ``os-getSPICEConsole`` action in the request body. @@ -170,14 +170,11 @@ Gets a VNC console for a server. .. warning:: This action is deprecated in microversion 2.5 and superseded - by the API `Server Remote Consoles`_ in microversion 2.6. + by the API `Server Consoles`_ in microversion 2.6. The new API offers a unified API for different console types. Specify the ``os-getVNCConsole`` action in the request body. -The supported connection types are ``novnc``, ``xvpvnc``. Such as connect -with ``novnc``, set ``type`` parameter to ``novnc``. - Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), diff --git a/api-ref/source/servers-action-shelve.inc b/api-ref/source/servers-action-shelve.inc index b024031cdfc..08ca65daddb 100644 --- a/api-ref/source/servers-action-shelve.inc +++ b/api-ref/source/servers-action-shelve.inc @@ -138,15 +138,20 @@ If the server status does not change to ``ACTIVE``, the unshelve operation faile Normal response codes: 202 -Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) +Error response codes: badRequest(400), unauthorized(401), forbidden(403), itemNotFound(404), conflict(409) Request ------- +.. note:: Since microversion 2.77, allowed request body schema are + {"unshelve": null} or {"unshelve": {"availability_zone": }}. + A request body of {"unshelve": {}} is not allowed. + .. rest_parameters:: parameters.yaml - server_id: server_id_path - unshelve: unshelve + - availability_zone: availability_zone_unshelve | @@ -155,6 +160,11 @@ Request .. literalinclude:: ../../doc/api_samples/os-shelve/os-unshelve.json :language: javascript +**Example Unshelve server (unshelve Action) (v2.77)** + +.. literalinclude:: ../../doc/api_samples/os-shelve/v2.77/os-unshelve.json + :language: javascript + Response -------- diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc index 530b8447249..4be66ebafad 100644 --- a/api-ref/source/servers-actions.inc +++ b/api-ref/source/servers-actions.inc @@ -10,11 +10,9 @@ in the request body. You can associate a fixed or floating IP address with a server, or disassociate a fixed or floating IP address from a server. -You can attach a volume to a server. You can create an image from a server, create a backup of a server, -evacuate a server from a failed host to a new host, and force-delete a -server before deferred cleanup. +and force-delete a server before deferred cleanup. You can lock, pause, reboot, rebuild, rescue, resize, resume, confirm the resize of, revert a pending resize for, shelve, shelf-offload, unshelve, start, stop, unlock, unpause, and unrescue a server. You can @@ -24,6 +22,7 @@ into a server since Mitaka release. You can get an RDP, serial, SPICE, or VNC console for a server. + Add (Associate) Floating Ip (addFloatingIp Action) (DEPRECATED) ================================================================ @@ -40,7 +39,7 @@ A pool of floating IP addresses, configured by the cloud administrator, is available in OpenStack Compute. The project quota defines the maximum number of floating IP addresses that you can allocate to the project. After you `create (allocate) a floating IPaddress -`__ +`__ for a project, you can associate that address with the server. Specify the ``addFloatingIp`` action in the request body. @@ -164,7 +163,7 @@ Specify the ``confirmResize`` action in the request body. After you make this request, you typically must keep polling the server status to determine whether the request succeeded. A successfully confirming resize operation shows a status of ``ACTIVE`` or ``SHUTOFF`` -and a migration_status of ``confirmed``. You can also see the resized +and a migration status of ``confirmed``. You can also see the resized server in the compute node that OpenStack Compute manages. **Preconditions** @@ -177,9 +176,20 @@ to confirm the server. **Troubleshooting** -If the server status remains ``RESIZED``, the request failed. Ensure you +If the server status remains ``VERIFY_RESIZE``, the request failed. Ensure you meet the preconditions and run the request again. If the request fails -again, investigate the compute back end or ask your cloud provider. +again, the server status should be ``ERROR`` and a migration status of +``error``. Investigate the compute back end or ask your cloud provider. +There are some options for trying to correct the server status: + +* If the server is running and networking works, a user with proper + authority could reset the status of the server to ``active`` using the + :ref:`os-resetState` API. +* If the server is not running, you can try hard rebooting the server using + the :ref:`reboot` API. + +Note that the cloud provider may still need to cleanup any orphaned resources +on the source hypervisor. Normal response codes: 204 @@ -285,9 +295,28 @@ image in the image back end that OpenStack Image service manages. The server must exist. You can only create a new image from the server when its status is ``ACTIVE``, -``SHUTOFF``, ``PAUSED``, or ``SUSPENDED``. +``SHUTOFF``, ``SUSPENDED`` or ``PAUSED`` +(``PAUSED`` is only supported for image-backed servers). + +The project must have sufficient volume snapshot quota in the block storage +service when the server has attached volumes. +If the project does not have sufficient volume snapshot quota, +the API returns a 403 error. + +**Asynchronous Postconditions** -The connection to the Image service is valid. +A snapshot image will be created in the Image service. + +In the image-backed server case, volume snapshots of attached volumes will not +be created. +In the volume-backed server case, +volume snapshots will be created for all volumes attached to the server and +then those will be represented with a ``block_device_mapping`` image property +in the resulting snapshot image in the Image service. +If that snapshot image is used later to create a new server, +it will result in a volume-backed server where the root volume is created +from the snapshot of the original root volume. The volumes created from +the snapshots of the original other volumes will be attached to the server. **Troubleshooting** @@ -349,9 +378,31 @@ Locks a server. Specify the ``lock`` action in the request body. +Most actions by non-admin users are not allowed to the server +after this operation is successful and the server is locked. +See the "Lock, Unlock" item in `Server actions +`_ +for the restricted actions. +But administrators can perform actions on the server +even though the server is locked. Note that from microversion 2.73 it is +possible to specify a reason when locking the server. + +The `unlock action +`_ +will unlock a server in locked state so additional actions can +be performed on the server by non-admin users. + +You can know whether a server is locked or not and the ``locked_reason`` +(if specified, from the 2.73 microversion) by the `List Servers Detailed API +`_ +or +the `Show Server Details API +`_. + Policy defaults enable only users with the administrative role or the owner of the server to perform this operation. Cloud providers can change these permissions through the ``policy.json`` file. +Administrators can overwrite owner's lock. Normal response codes: 202 @@ -365,12 +416,18 @@ Request - server_id: server_id_path - lock: lock + - locked_reason: locked_reason_req **Example Lock Server (lock Action)** .. literalinclude:: ../../doc/api_samples/os-lock-server/lock-server.json :language: javascript +**Example Lock Server (lock Action) (v2.73)** + +.. literalinclude:: ../../doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json + :language: javascript + Response -------- @@ -414,6 +471,8 @@ Response If successful, this method does not return content in the response body. +.. _reboot: + Reboot Server (reboot Action) ============================= @@ -525,12 +584,18 @@ Request - key_name: key_name_rebuild_req - user_data: user_data_rebuild_req - trusted_image_certificates: server_trusted_image_certificates_rebuild_req + - hostname: server_hostname_req **Example Rebuild Server (rebuild Action) (v2.63)** .. literalinclude:: ../../doc/api_samples/servers/v2.63/server-action-rebuild.json :language: javascript +**Example Rebuild Server (rebuild Action) (v2.90)** + +.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-action-rebuild.json + :language: javascript + Response -------- @@ -575,12 +640,37 @@ Response - key_name: key_name_rebuild_resp - user_data: user_data_rebuild_resp - trusted_image_certificates: server_trusted_image_certificates_resp - -**Example Rebuild Server (rebuild Action) (v2.63)** - -.. literalinclude:: ../../doc/api_samples/servers/v2.63/server-action-rebuild-resp.json + - server_groups: server_groups_2_71 + - locked_reason: locked_reason_resp + - config_drive: config_drive_resp_update_rebuild + - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone_update_rebuild + - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host_update_rebuild + - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild + - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name_update_rebuild + - OS-EXT-STS:power_state: OS-EXT-STS:power_state_update_rebuild + - OS-EXT-STS:task_state: OS-EXT-STS:task_state_update_rebuild + - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state_update_rebuild + - OS-EXT-SRV-ATTR:hostname: server_hostname_update_rebuild + - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id_update_rebuild + - OS-EXT-SRV-ATTR:launch_index: server_launch_index_update_rebuild + - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id_update_rebuild + - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id_update_rebuild + - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name_update_rebuild + - os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached_update_rebuild + - os-extended-volumes:volumes_attached.id: os-extended-volumes:volumes_attached.id_update_rebuild + - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild + - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at_update_rebuild + - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at_update_rebuild + - security_groups: security_groups_obj_update_rebuild + - security_group.name: name_update_rebuild + - host_status: host_status_update_rebuild + +**Example Rebuild Server (rebuild Action) (v2.75)** + +.. literalinclude:: ../../doc/api_samples/servers/v2.75/server-action-rebuild-resp.json :language: javascript + Remove (Disassociate) Floating Ip (removeFloatingIp Action) (DEPRECATED) ========================================================================= @@ -710,6 +800,7 @@ Response .. literalinclude:: ../../doc/api_samples/os-rescue/server-rescue.json :language: javascript + Resize Server (resize Action) ============================= @@ -719,12 +810,6 @@ Resizes a server. Specify the ``resize`` action in the request body. -A successfully resized server shows a ``VERIFY_RESIZE`` status, -``RESIZED`` VM status, and ``finished`` migration status. If you set the -``resize_confirm_window`` option of the Compute service to an integer value, -the Compute service automatically confirms the resize operation after -the set interval in seconds. - **Preconditions** You can only resize a server when its status is ``ACTIVE`` or ``SHUTOFF``. @@ -732,6 +817,18 @@ You can only resize a server when its status is ``ACTIVE`` or ``SHUTOFF``. If the server is locked, you must have administrator privileges to resize the server. +**Asynchronous Postconditions** + +A successfully resized server shows a ``VERIFY_RESIZE`` status and ``finished`` +migration status. If the cloud has configured the `resize_confirm_window`_ +option of the Compute service to a positive value, the Compute service +automatically confirms the resize operation after the configured interval. + +.. _resize_confirm_window: https://docs.openstack.org/nova/latest/configuration/config.html#DEFAULT.resize_confirm_window + +.. note:: There is a `known limitation `__ + that ephemeral disks are not resized. + Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403), diff --git a/api-ref/source/servers-admin-action.inc b/api-ref/source/servers-admin-action.inc index e86e3e63f16..03a40d38ce5 100644 --- a/api-ref/source/servers-admin-action.inc +++ b/api-ref/source/servers-admin-action.inc @@ -8,7 +8,8 @@ Enables administrators to perform an action on a server. Specify the action in the request body. You can inject network information into, migrate, live-migrate, -reset networking on, and reset the state of a server. +reset networking on, reset the state of a server, +and evacuate a server from a failed host to a new host. Inject Network Information (injectNetworkInfo Action) @@ -67,12 +68,12 @@ this parameter, the scheduler chooses a host. **Asynchronous Postconditions** -The server goes to a ``VERIFY_RESIZE`` status, ``RESIZED`` VM status, -and ``finished`` migration status after a successful cold migration -and then must be confirmed or reverted. If you set the -``resize_confirm_window`` option of the Compute service to a positive integer -value, the Compute service automatically confirms the migrate operation -after the set interval in seconds. +A successfully migrated server shows a ``VERIFY_RESIZE`` status and ``finished`` +migration status. If the cloud has configured the `resize_confirm_window`_ +option of the Compute service to a positive value, the Compute service +automatically confirms the migrate operation after the configured interval. + +.. _resize_confirm_window: https://docs.openstack.org/nova/latest/configuration/config.html#DEFAULT.resize_confirm_window Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions @@ -136,6 +137,10 @@ Nova responds immediately, and no pre-live-migration checks are returned. The instance will not immediately change state to ``ERROR``, if a failure of the live-migration checks occurs. +Starting from API version 2.68, the ``force`` parameter is no longer accepted +as this could not be meaningfully supported by servers with complex resource +allocations. + Normal response codes: 202 Error response codes: badRequest(400), unauthorized(401), forbidden(403) @@ -165,17 +170,19 @@ Response If successful, this method does not return content in the response body. -Reset Networking On A Server (resetNetwork Action) -================================================== +Reset Networking On A Server (resetNetwork Action) (DEPRECATED) +=============================================================== .. rest_method:: POST /servers/{server_id}/action Resets networking on a server. -.. note:: +.. warning:: - Only the XenServer driver implements this feature and only if the guest - has the XenAPI agent in the targeted server. + This action was only supported by the XenAPI virt driver, which was + deprecated in the 20.0.0 (Train) release and removed in the 22.0.0 + (Victoria) release. This action should be avoided in new applications. It + was removed in the 23.0.0 (Wallaby) release. Specify the ``resetNetwork`` action in the request body. @@ -186,7 +193,7 @@ through the ``policy.json`` file. Normal response codes: 202 Error response codes: unauthorized(401), forbidden(403), itemNotFound(404), -conflict(409) +conflict(409), gone(410) Request ------- @@ -206,6 +213,7 @@ Response If successful, this method does not return content in the response body. +.. _os-resetState: Reset Server State (os-resetState Action) ========================================= diff --git a/api-ref/source/servers-remote-consoles.inc b/api-ref/source/servers-remote-consoles.inc index 7bcf96b17c0..c8515d3315b 100644 --- a/api-ref/source/servers-remote-consoles.inc +++ b/api-ref/source/servers-remote-consoles.inc @@ -1,13 +1,13 @@ .. -*- rst -*- -====================== -Server Remote Consoles -====================== +================= + Server Consoles +================= -Create server remote console. +Manage server consoles. -Create Remote Console -===================== +Create Console +============== .. rest_method:: POST /servers/{server_id}/remote-consoles @@ -17,9 +17,7 @@ The API provides a unified request for creating a remote console. The user can get a URL to connect the console from this API. The URL includes the token which is used to get permission to access the console. Servers may support different console protocols. To return a remote console using a specific -protocol, such as RDP, set the ``protocol`` parameter to ``rdp``. For the same -protocol, there may be different connection types such as ``vnc protocol and -novnc type`` or ``vnc protocol and xvpvnc type``. +protocol, such as RDP, set the ``protocol`` parameter to ``rdp``. Normal response codes: 200 @@ -56,3 +54,45 @@ Response .. literalinclude:: ../../doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json :language: javascript + + +Show Console Connection Information +=================================== + +.. rest_method:: GET /os-console-auth-tokens/{console_token} + +Given the console authentication token for a server, shows the related +connection information. + +This method used to be available only for the ``rdp-html5`` console type before +microversion 2.31. Starting from microversion 2.31 it's available for all +console types. + +Normal response codes: 200 + +Error response codes: badRequest(400), unauthorized(401), forbidden(403), +itemNotFound(404) + +Request +------- + +.. rest_parameters:: parameters.yaml + + - console_token: console_token + + +Response +-------- + +.. rest_parameters:: parameters.yaml + + - console: console + - instance_uuid: instance_id_body + - host: console_host + - port: port_number + - internal_access_path: internal_access_path + +**Example Show Console Authentication Token** + +.. literalinclude:: ../../doc/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json + :language: javascript diff --git a/api-ref/source/servers.inc b/api-ref/source/servers.inc index 8883f9af0ac..547a71e9146 100644 --- a/api-ref/source/servers.inc +++ b/api-ref/source/servers.inc @@ -99,8 +99,10 @@ List Servers .. rest_method:: GET /servers -Lists IDs, names, and links for all servers. +Lists IDs, names, and links for servers. +By default the servers are filtered using the project ID associated +with the authenticated request. Servers contain a status attribute that indicates the current server state. You can filter on the server status when you complete a list @@ -137,13 +139,12 @@ body. The possible server status values are: - ``SOFT_DELETED``. The server is marked as deleted but the disk images are still available to restore. - ``SUSPENDED``. The server is suspended, either by request or - necessity. This status appears for only the XenServer/XCP, KVM, and - ESXi hypervisors. Administrative users can suspend an instance if it - is infrequently used or to perform system maintenance. When you - suspend an instance, its VM state is stored on disk, all memory is - written to disk, and the virtual machine is stopped. Suspending an - instance is similar to placing a device in hibernation; memory and - vCPUs become available to create other instances. + necessity. When you suspend a server, its state is stored + on disk, all memory is written to disk, and the server is stopped. + Suspending a server is similar to placing a device in hibernation and its + occupied resource will not be freed but rather kept for when the server is + resumed. If a server is infrequently used and the occupied resource needs + to be freed to create other servers, it should be shelved. - ``UNKNOWN``. The state of the server is unknown. Contact your cloud provider. - ``VERIFY_RESIZE``. System is awaiting confirmation that the server @@ -154,9 +155,10 @@ There is whitelist for valid filter keys. Any filter key other than from whitelist will be silently ignored. - For non-admin users, whitelist is different from admin users whitelist. - Valid whitelist for non-admin users includes + The valid whitelist can be configured using the + ``os_compute_api:servers:allow_all_filters`` policy rule. By default, + the valid whitelist for non-admin users includes - - ``all_tenants`` - ``changes-since`` - ``flavor`` - ``image`` @@ -169,11 +171,31 @@ whitelist will be silently ignored. - ``status`` - ``tags`` (New in version 2.26) - ``tags-any`` (New in version 2.26) - + - ``changes-before`` (New in version 2.66) + - ``locked`` (New in version 2.73) + - ``availability_zone`` (New in version 2.83) + - ``config_drive`` (New in version 2.83) + - ``key_name`` (New in version 2.83) + - ``created_at`` (New in version 2.83) + - ``launched_at`` (New in version 2.83) + - ``terminated_at`` (New in version 2.83) + - ``power_state`` (New in version 2.83) + - ``task_state`` (New in version 2.83) + - ``vm_state`` (New in version 2.83) + - ``progress`` (New in version 2.83) + - ``user_id`` (New in version 2.83) - For admin user, whitelist includes all filter keys mentioned in :ref:`list-server-request` Section. +.. note:: Starting with microversion 2.69 if server details cannot be loaded + due to a transient condition in the deployment like infrastructure failure, + the response body for those unavailable servers will be missing keys. See + `handling down cells + `__ + section of the Compute API guide for more information on the keys that + would be returned in the partial constructs. + Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), @@ -230,6 +252,8 @@ Request - not-tags-any: not_tags_any_query - tags: tags_query - tags-any: tags_any_query + - changes-before: changes_before_server + - locked: locked_query_server Response -------- @@ -247,6 +271,15 @@ Response .. literalinclude:: ../../doc/api_samples/servers/servers-list-resp.json :language: javascript +**Example List Servers (2.69)** + +This is a sample response for the servers from the non-responsive part of the +deployment. The responses for the available server records will be normal +without any missing keys. + +.. literalinclude:: ../../doc/api_samples/servers/v2.69/servers-list-resp.json + :language: javascript + Create Server ============= @@ -271,13 +304,13 @@ When you create a server, the response shows only the server ID, its links, and the admin password. You can get additional attributes through subsequent ``GET`` requests on the server. -Include the ``block-device-mapping-v2`` parameter in the create +Include the ``block_device_mapping_v2`` parameter in the create request body to boot a server from a volume. Include the ``key_name`` parameter in the create request body to add a keypair to the server when you create it. To create a keypair, make a `create keypair -`__ +`__ request. .. note:: Starting with microversion 2.37 the ``networks`` field is required. @@ -335,7 +368,6 @@ Request .. rest_parameters:: parameters.yaml - - server: server - flavorRef: flavorRef - name: server_name @@ -361,6 +393,7 @@ Request - block_device_mapping_v2.uuid: block_device_uuid - block_device_mapping_v2.volume_size: volume_size - block_device_mapping_v2.tag: device_tag_bdm + - block_device_mapping_v2.volume_type: device_volume_type - config_drive: config_drive - imageRef: imageRef - key_name: key_name @@ -370,8 +403,11 @@ Request - security_groups: security_groups - user_data: user_data - description: server_description + - hostname: server_hostname_req - tags: server_tags_create - trusted_image_certificates: server_trusted_image_certificates_create_req + - host: server_host_create + - hypervisor_hostname: server_hypervisor_hostname_create - os:scheduler_hints: os:scheduler_hints - os:scheduler_hints.build_near_host_ip: os:scheduler_hints_build_near_host_ip - os:scheduler_hints.cidr: os:scheduler_hints_cidr @@ -402,6 +438,16 @@ Request .. literalinclude:: ../../doc/api_samples/servers/v2.63/server-create-req.json :language: javascript +**Example Create Server With Host and Hypervisor Hostname (v2.74)** + +.. literalinclude:: ../../doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json + :language: javascript + +**Example Create Server With Hostname (v2.90)** + +.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-create-req.json + :language: javascript + Response -------- @@ -483,7 +529,7 @@ List Servers Detailed .. rest_method:: GET /servers/detail -For each server, shows server details including configuration drive, +For each server, shows server details including config drive, extended status, and server usage information. The extended status information appears in the OS-EXT-STS:vm_state, @@ -492,12 +538,16 @@ OS-EXT-STS:power_state, and OS-EXT-STS:task_state attributes. The server usage information appears in the OS-SRV-USG:launched_at and OS-SRV-USG:terminated_at attributes. -To hide addresses information for instances in a certain state, set -the osapi_hide_server_address_states configuration option. Set this -option to a valid VM state in the nova.conf configuration file. - HostId is unique per account and is not globally unique. +.. note:: Starting with microversion 2.69 if server details cannot be loaded + due to a transient condition in the deployment like infrastructure failure, + the response body for those unavailable servers will be missing keys. See + `handling down cells + `__ + section of the Compute API guide for more information on the keys that + would be returned in the partial constructs. + Normal response codes: 200 Error response codes: badRequest(400), unauthorized(401), @@ -552,6 +602,8 @@ Request - not-tags-any: not_tags_any_query - tags: tags_query - tags-any: tags_any_query + - changes-before: changes_before_server + - locked: locked_query_server Response -------- @@ -586,8 +638,15 @@ Response - OS-DCF:diskConfig: disk_config - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host + - OS-EXT-SRV-ATTR:hostname: server_hostname - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name + - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id + - OS-EXT-SRV-ATTR:launch_index: server_launch_index + - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id + - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id + - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name + - OS-EXT-SRV-ATTR:user_data: server_user_data - OS-EXT-STS:power_state: OS-EXT-STS:power_state - OS-EXT-STS:task_state: OS-EXT-STS:task_state - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state @@ -596,8 +655,6 @@ Response - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at - - security_groups: security_groups_obj - - security_group.name: name - status: server_status - tenant_id: tenant_id_body - updated: updated @@ -608,23 +665,28 @@ Response - fault.message: fault_message - fault.details: fault_details - progress: progress + - security_groups: security_groups_obj_optional + - security_group.name: name - servers_links: servers_links - - OS-EXT-SRV-ATTR:hostname: server_hostname - - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id - - OS-EXT-SRV-ATTR:launch_index: server_launch_index - - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id - - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id - - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name - - OS-EXT-SRV-ATTR:user_data: server_user_data - locked: locked - host_status: host_status - description: server_description_resp - tags: tags - trusted_image_certificates: server_trusted_image_certificates_resp + - locked_reason: locked_reason_resp -**Example List Servers Detailed (2.63)** +**Example List Servers Detailed (2.73)** -.. literalinclude:: /../../doc/api_samples/servers/v2.63/servers-details-resp.json +.. literalinclude:: /../../doc/api_samples/servers/v2.73/servers-details-resp.json + :language: javascript + +**Example List Servers Detailed (2.69)** + +This is a sample response for the servers from the non-responsive part of the +deployment. The responses for the available server records will be normal +without any missing keys. + +.. literalinclude:: ../../doc/api_samples/servers/v2.69/servers-details-resp.json :language: javascript @@ -641,14 +703,20 @@ The extended status information appears in the ``OS-EXT-STS:vm_state``, ``OS-EXT The server usage information appears in the ``OS-SRV-USG:launched_at`` and ``OS-SRV-USG:terminated_at`` attributes. -To hide ``addresses`` information for instances in a certain state, set the ``osapi_hide_server_address_states`` configuration option. Set this option to a valid VM state in the ``nova.conf`` configuration file. - HostId is unique per account and is not globally unique. **Preconditions** The server must exist. +.. note:: Starting with microversion 2.69 if the server detail cannot be loaded + due to a transient condition in the deployment like infrastructure failure, + the response body for the unavailable server will be missing keys. See + `handling down cells + `__ + section of the Compute API guide for more information on the keys that + would be returned in the partial constructs. + Normal response codes: 200 Error response codes: unauthorized(401), forbidden(403), @@ -694,8 +762,15 @@ Response - OS-DCF:diskConfig: disk_config - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host + - OS-EXT-SRV-ATTR:hostname: server_hostname - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name + - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id + - OS-EXT-SRV-ATTR:launch_index: server_launch_index + - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id + - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id + - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name + - OS-EXT-SRV-ATTR:user_data: server_user_data - OS-EXT-STS:power_state: OS-EXT-STS:power_state - OS-EXT-STS:task_state: OS-EXT-STS:task_state - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state @@ -704,8 +779,6 @@ Response - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at - - security_groups: security_groups_obj - - security_group.name: name - status: server_status - tenant_id: tenant_id_body - updated: updated @@ -716,22 +789,28 @@ Response - fault.message: fault_message - fault.details: fault_details - progress: progress - - OS-EXT-SRV-ATTR:hostname: server_hostname - - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id - - OS-EXT-SRV-ATTR:launch_index: server_launch_index - - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id - - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id - - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name - - OS-EXT-SRV-ATTR:user_data: server_user_data + - security_groups: security_groups_obj_optional + - security_group.name: name - locked: locked - host_status: host_status - description: server_description_resp - tags: tags - trusted_image_certificates: server_trusted_image_certificates_resp + - server_groups: server_groups_2_71 + - locked_reason: locked_reason_resp -**Example Show Server Details (2.63)** +**Example Show Server Details (2.73)** -.. literalinclude:: ../../doc/api_samples/servers/v2.63/server-get-resp.json +.. literalinclude:: ../../doc/api_samples/servers/v2.73/server-get-resp.json + :language: javascript + +**Example Show Server Details (2.69)** + +This is a sample response for a server from the non-responsive part of the +deployment. The responses for available server records will be normal +without any missing keys. + +.. literalinclude:: ../../doc/api_samples/servers/v2.69/server-get-resp.json :language: javascript Update Server @@ -756,11 +835,14 @@ Request - accessIPv4: accessIPv4_in - accessIPv6: accessIPv6_in - name: server_name_optional + - hostname: server_hostname_req - OS-DCF:diskConfig: OS-DCF:diskConfig - description: server_description -.. note:: You can specify parameters to update independently. - e.g. ``name`` only, ``description`` only, ``name`` and ``description``, etc. +.. note:: + + You can specify parameters to update independently. + e.g. ``name`` only, ``description`` only, ``name`` and ``description``, etc. **Example Update Server (2.63)** @@ -810,10 +892,36 @@ Response - description: server_description_resp - tags: tags - trusted_image_certificates: server_trusted_image_certificates_resp - -**Example Update Server (2.63)** - -.. literalinclude:: ../../doc/api_samples/servers/v2.63/server-update-resp.json + - server_groups: server_groups_2_71 + - locked_reason: locked_reason_resp + - config_drive: config_drive_resp_update_rebuild + - OS-EXT-AZ:availability_zone: OS-EXT-AZ:availability_zone_update_rebuild + - OS-EXT-SRV-ATTR:host: OS-EXT-SRV-ATTR:host_update_rebuild + - OS-EXT-SRV-ATTR:hostname: server_hostname_update_rebuild + - OS-EXT-SRV-ATTR:hypervisor_hostname: OS-EXT-SRV-ATTR:hypervisor_hostname_update_rebuild + - OS-EXT-SRV-ATTR:instance_name: OS-EXT-SRV-ATTR:instance_name_update_rebuild + - OS-EXT-SRV-ATTR:kernel_id: server_kernel_id_update_rebuild + - OS-EXT-SRV-ATTR:launch_index: server_launch_index_update_rebuild + - OS-EXT-SRV-ATTR:ramdisk_id: server_ramdisk_id_update_rebuild + - OS-EXT-SRV-ATTR:reservation_id: server_reservation_id_update_rebuild + - OS-EXT-SRV-ATTR:root_device_name: server_root_device_name_update_rebuild + - OS-EXT-SRV-ATTR:user_data: server_user_data_update + - OS-EXT-STS:power_state: OS-EXT-STS:power_state_update_rebuild + - OS-EXT-STS:task_state: OS-EXT-STS:task_state_update_rebuild + - OS-EXT-STS:vm_state: OS-EXT-STS:vm_state_update_rebuild + - os-extended-volumes:volumes_attached: os-extended-volumes:volumes_attached_update_rebuild + - os-extended-volumes:volumes_attached.id: os-extended-volumes:volumes_attached.id_update_rebuild + - os-extended-volumes:volumes_attached.delete_on_termination: os-extended-volumes:volumes_attached.delete_on_termination_update_rebuild + - OS-SRV-USG:launched_at: OS-SRV-USG:launched_at_update_rebuild + - OS-SRV-USG:terminated_at: OS-SRV-USG:terminated_at_update_rebuild + - security_groups: security_groups_obj_update_rebuild + - security_group.name: name_update_rebuild + - host_status: host_status_update_rebuild + - key_name: key_name_resp_update + +**Example Update Server (2.75)** + +.. literalinclude:: ../../doc/api_samples/servers/v2.75/server-update-resp.json :language: javascript Delete Server diff --git a/api-ref/source/versions.inc b/api-ref/source/versions.inc index 7af60604f25..f8636d3b40c 100644 --- a/api-ref/source/versions.inc +++ b/api-ref/source/versions.inc @@ -14,7 +14,7 @@ supports versioning. There are two kinds of versions in Nova. For more details about Microversions, please reference: `Microversions -`_ +`_ .. note:: The maximum microversion supported by each release varies. Please reference: @@ -66,7 +66,7 @@ v2.1 API is lower than listed below. Show Details of Specific API Version ==================================== -.. rest_method:: GET /{api_version} +.. rest_method:: GET /{api_version}/ This gets the details of a specific API at its root. Nearly all this information exists at the API root, so this is mostly a redundant @@ -102,7 +102,7 @@ Response Response Example ---------------- -This is an example of a ``GET /v2.1`` on a relatively current server. +This is an example of a ``GET /v2.1/`` on a relatively current server. .. literalinclude:: /../../doc/api_samples/versions/v21-version-get-resp.json :language: javascript diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index 15cd6cb76b9..00000000000 --- a/babel.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[python: **.py] - diff --git a/bindep.txt b/bindep.txt index 4621c7fad69..3a4d7bef806 100644 --- a/bindep.txt +++ b/bindep.txt @@ -2,13 +2,19 @@ # see https://docs.openstack.org/infra/bindep/ for additional information. build-essential [platform:dpkg test] +# fonts-freefont-otf is needed for pdf docs builds with the 'xelatex' engine +fonts-freefont-otf [pdf-docs] gcc [platform:rpm test] # gettext and graphviz are needed by doc builds only. For transition, # have them in both doc and test. # TODO(jaegerandi): Remove test once infra scripts are updated. gettext [doc test] graphviz [doc test] +# libsrvg2 is needed for sphinxcontrib-svg2pdfconverter in docs builds. +librsvg2-tools [doc platform:rpm] +librsvg2-bin [doc platform:dpkg] language-pack-en [platform:ubuntu] +latexmk [pdf-docs] libffi-dev [platform:dpkg test] libffi-devel [platform:rpm test] libmysqlclient-dev [platform:dpkg] @@ -18,10 +24,15 @@ libxml2-dev [platform:dpkg test] libxslt-devel [platform:rpm test] libxslt1-dev [platform:dpkg test] locales [platform:debian] -mysql [platform:rpm] +mysql [platform:rpm !platform:redhat] mysql-client [platform:dpkg] -mysql-devel [platform:rpm test] -mysql-server +mysql-devel [platform:rpm !platform:redhat test] +mysql-server [!platform:redhat] +mariadb-devel [platform:rpm platform:redhat test] +mariadb-server [platform:rpm platform:redhat] +openssh-client [platform:dpkg] +openssh-clients [platform:rpm] +openssl pkg-config [platform:dpkg test] pkgconfig [platform:rpm test] postgresql @@ -29,11 +40,18 @@ postgresql-client [platform:dpkg] postgresql-devel [platform:rpm test] postgresql-server [platform:rpm] python-dev [platform:dpkg test] -python-devel [platform:rpm test] -python3-all [platform:dpkg !platform:ubuntu-precise] -python3-all-dev [platform:dpkg !platform:ubuntu-precise] -python3-devel [platform:fedora] -python34-devel [platform:centos] +python3-all [platform:dpkg] +python3-all-dev [platform:dpkg] +python3 [platform:rpm test] +python3-devel [platform:rpm test] sqlite-devel [platform:rpm test] +texlive [pdf-docs] +texlive-latex-recommended [pdf-docs] +texlive-xetex [pdf-docs] libpcre3-dev [platform:dpkg test] pcre-devel [platform:rpm test] +# Nova uses lsscsi via os-brick. Due to bindep usage in devstack and +# elsewhere, we add it here to make sure it is picked up and available at +# runtime and in unit tests. Net result is the same that lsscsi will be +# installed for any nova installation. +lsscsi diff --git a/concourse_unit_test_task b/concourse_unit_test_task new file mode 100644 index 00000000000..1ee9526f87b --- /dev/null +++ b/concourse_unit_test_task @@ -0,0 +1,8 @@ +export DEBIAN_FRONTEND=noninteractive && \ +apt-get update && \ +apt-get install -y build-essential python3-pip python3-dev git libpcre++-dev gettext libpq-dev && \ +pip install -U pip && \ +pip install tox && \ +cd source && \ +export UPPER_CONSTRAINTS_FILE=https://raw.githubusercontent.com/sapcc/requirements/stable/xena-m3/upper-constraints.txt && \ +tox -e pep8,py diff --git a/contrib/profile_caching_scheduler.sh b/contrib/profile_caching_scheduler.sh deleted file mode 100755 index df38ab12bfe..00000000000 --- a/contrib/profile_caching_scheduler.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# Copyright (c) 2014 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# This runs a unit test that uses pycallgraph -# to profile the select_destinations call -# in the CachingScheduler -# -# For this script to work please run: -# python setup.py develop -# pip install -r requirements.txt -# pip install -r test-requirements.txt -# pip install pycallgraph -# export EVENTLET_NO_GREENDNS='yes' -# -BASEDIR=$(dirname $0) -TEST=$BASEDIR/../nova/tests/scheduler/test_caching_scheduler.py -echo -echo "Running this unit test file as a python script:" -echo $TEST - -python $TEST - -RESULTDIR=$(pwd) -echo -echo "For profiler result see: " -echo $RESULTDIR/scheduler.png -echo diff --git a/custom-requirements.txt b/custom-requirements.txt new file mode 100644 index 00000000000..c3732972506 --- /dev/null +++ b/custom-requirements.txt @@ -0,0 +1,11 @@ +dumb-init +python-memcached +pymemcache +mitmproxy +python-ironicclient + +-e git+https://github.com/sapcc/raven-python.git@ccloud#egg=raven +-e git+https://github.com/sapcc/openstack-watcher-middleware.git#egg=watcher-middleware +-e git+https://github.com/sapcc/openstack-audit-middleware.git#egg=audit-middleware +-e git+https://github.com/sapcc/python-agentliveness.git#egg=agentliveness +-e git+https://github.com/sapcc/oslo.vmware.git@stable/xena-m3#egg=oslo.vmware diff --git a/devstack/nova-multi-cell-exclude-list.txt b/devstack/nova-multi-cell-exclude-list.txt new file mode 100644 index 00000000000..a61229c9064 --- /dev/null +++ b/devstack/nova-multi-cell-exclude-list.txt @@ -0,0 +1,12 @@ +# --exclude-list contents for the nova-multi-cell job defined in .zuul.yaml +# See: https://stestr.readthedocs.io/en/latest/MANUAL.html#test-selection + +# Exclude tempest.scenario.test_network tests since they are slow and +# only test advanced neutron features, unrelated to multi-cell testing. +^tempest.scenario.test_network + +# Also exlude resize and migrate tests with qos ports as qos is currently +# not supported in cross cell resize case . See +# https://bugs.launchpad.net/nova/+bug/1907511 for details +test_migrate_with_qos_min_bw_allocation +test_resize_with_qos_min_bw_allocation diff --git a/devstack/tempest-dsvm-caching-scheduler-rc b/devstack/tempest-dsvm-caching-scheduler-rc deleted file mode 100644 index cc09af6b82d..00000000000 --- a/devstack/tempest-dsvm-caching-scheduler-rc +++ /dev/null @@ -1,30 +0,0 @@ -# -# This script is executed in the OpenStack CI nova-caching-scheduler job. -# It's used to configure which tempest tests actually get run. You can find -# the CI job configuration under playbooks/legacy/nova-caching-scheduler/. -# - -# Construct a regex to use when limiting scope of tempest -# to avoid features unsupported by Nova's CachingScheduler support. - -# When adding entries to the regex, add a comment explaining why -# since this list should not grow. - -r="^(?!.*" -# exclude the slow tag -r="$r(?:.*\[.*\bslow\b.*\])" - -# NOTE(mriedem): ServersAdminTestJSON.test_create_server_with_scheduling_hint -# is skipped because it relies on the SameHostFilter which relies on the -# HostState object which might be stale when that filter runs. -# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_create_server_with_scheduling_hint -r="$r|(?:.*id\-fdcd9b33\-0903\-4e00\-a1f7\-b5f6543068d6.*)" -# NOTE(mriedem): AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az -# is skipped because it creates an aggregate and adds a host to it, then -# creates a server in that aggregate but fails to schedule because the caching -# scheduler hasn't updated the host's aggregates view yet. -# tempest.api.compute.admin.test_aggregates.AggregatesAdminTestJSON.test_aggregate_add_host_create_server_with_az -r="$r|(?:.*id\-96be03c7\-570d\-409c\-90f8\-e4db3c646996.*)" -r="$r).*$" - -export DEVSTACK_GATE_TEMPEST_REGEX="$r" diff --git a/devstack/tempest-dsvm-cells-rc b/devstack/tempest-dsvm-cells-rc deleted file mode 100644 index fadcc621118..00000000000 --- a/devstack/tempest-dsvm-cells-rc +++ /dev/null @@ -1,120 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# This script is executed in the OpenStack CI *tempest-dsvm-cells job. -# It's used to configure which tempest tests actually get run. You can find -# the CI job configuration here: -# -# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/devstack-gate.yaml -# -# NOTE(sdague): tempest (because of testr) only supports and additive -# regex for specifying test selection. As such this is a series of -# negative assertions ?: for strings. -# -# Being a regex, an unescaped '.' matches any character, so those -# should be escaped. There is no need to specify .* at the end of a -# pattern, as it's handled by the final match. - -# Test idempotent ids are used for specific tests because -# these are unchanged if the test name changes. - -# Construct a regex to use when limiting scope of tempest -# to avoid features unsupported by Nova Cells. -r="^(?!.*" - -# skip security group tests -r="$r(?:tempest\.api\.compute\.security_groups)" - -# skip aggregates tests -r="$r|(?:tempest\.api\.compute\.admin\.test_aggregates)" -r="$r|(?:tempest\.scenario\.test_aggregates_basic_ops)" - -# skip availability zone tests -r="$r|(?:(tempest\.api\.compute\.)(servers\.|admin\.)(test_availability_zone*))" - -# exclude the slow tag -r="$r|(?:.*\[.*\bslow\b.*\])" - -# skip current regressions; when adding new entries to this list, add the bug -# reference with it since this list should shrink - -# NOTE(mriedem): Resize tests are skipped in devstack until custom flavors -# in devstack used in Tempest runs are synced to the cells database. -# NOTE(mriedem): Rescue tests are skipped in devstack. They rely on floating -# IPs and security groups, and rescue might not work with cells v1 anyway due -# to synchronization issues. - -# tempest.api.compute.admin.test_networks.NetworksTest.test_get_network)" -r="$r|(?:.*id\-d206d211\-8912\-486f\-86e2\-a9d090d1f416.*)" -# tempest.api.compute.admin.test_networks.NetworksTest.test_list_all_networks)" -r="$r|(?:.*id\-df3d1046\-6fa5\-4b2c\-ad0c\-cfa46a351cb9.*)" -# tempest.api.compute.servers.test_create_server.ServersTestJSON.test_create_server_with_scheduler_hint_group -r="$r|(?:.*id\-ed20d3fb\-9d1f\-4329\-b160\-543fbd5d9811.*)" -# tempest.api.compute.servers.test_virtual_interfaces.VirtualInterfacesTestJSON.test_list_virtual_interfaces -r="$r|(?:.*id\-96c4e2ef\-5e4d\-4d7f\-87f5\-fed6dca18016.*)" -# tempest.api.compute.test_networks.ComputeNetworksTest.test_list_networks -r="$r|(?:.*id\-3fe07175\-312e\-49a5\-a623\-5f52eeada4c2.*)" -# tempest.scenario.test_minimum_basic.TestMinimumBasicScenario.test_minimum_basic_scenario -r="$r|(?:.*id\-bdbb5441\-9204\-419d\-a225\-b4fdbfb1a1a8.*)" -# tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup -r="$r|(?:.*id\-cbc752ed\-b716\-4717\-910f\-956cce965722.*)" -# tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks -r="$r|(?:.*id\-79165fb4\-5534\-4b9d\-8429\-97ccffb8f86e.*)" -# tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basicops -r="$r|(?:.*id\-7fff3fb3\-91d8\-4fd0\-bd7d\-0204f1f180ba.*)" -# tempest.scenario.test_snapshot_pattern.TestSnapshotPattern.test_snapshot_pattern -r="$r|(?:.*id\-608e604b\-1d63\-4a82\-8e3e\-91bc665c90b4.*)" -# tempest.api.compute.admin.test_hosts.HostsAdminTestJSON.test_show_host_detail -r="$r|(?:.*id\-38adbb12\-aee2\-4498\-8aec\-329c72423aa4.*)" -# tempest.api.compute.test_tenant_networks.ComputeTenantNetworksTest.test_list_show_tenant_networks -r="$r|(?:.*id\-edfea98e\-bbe3\-4c7a\-9739\-87b986baff26.*)" -# https://bugs.launchpad.net/nova/+bug/1489581 -r="$r|(?:tempest\.scenario\.test_volume_boot_pattern\.)" -# https://bugs.launchpad.net/nova/+bug/1466696 - Cells: Race between instance 'unlock' and 'stop' can cause 'stop' to fail -# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server -r="$r|(?:.*id\-80a8094c\-211e\-440a\-ab88\-9e59d556c7ee.*)" -# scheduler hints apparently don't work in devstack cells -# tempest.scenario.test_server_multinode.TestServerMultinode.test_schedule_to_all_nodes -r="$r|(?:.*id\-9cecbe35\-b9d4\-48da\-a37e\-7ce70aa43d30.*)" -# test_stamp_pattern uses security groups which aren't supported in cells v1 -# tempest.scenario.test_stamp_pattern.TestStampPattern.test_stamp_pattern -r="$r|(?:.*id\-10fd234a\-515c\-41e5\-b092\-8323060598c5.*)" -# Bug 1709985: rebuild randomly times out, probably due to sync issues -# tempest.api.compute.admin.test_servers.ServersAdminTestJSON.test_rebuild_server_in_error_state -r="$r|(?:.*id\-682cb127\-e5bb\-4f53\-87ce\-cb9003604442.*)" -# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_rebuild_server_with_auto_disk_config -r="$r|(?:.*id\-9c9fae77\-4feb\-402f\-8450\-bf1c8b609713.*)" -# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_rebuild_server_with_manual_disk_config -r="$r|(?:.*id\-bef56b09\-2e8c\-4883\-a370\-4950812f430e.*)" -# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server -r="$r|(?:.*id\-aaa6cdf3\-55a7\-461a\-add9\-1c8596b9a07c.*)" -# tempest.api.compute.servers.test_servers.ServerShowV247Test.test_update_rebuild_list_server -r="$r|(?:.*id\-8de397c2\-57d0\-4b90\-aa30\-e5d668f21a8b.*)" -# tempest.api.compute.servers.test_servers_microversions.ServerShowV254Test.test_rebuild_server -r="$r|(?:.*id\-09170a98\-4940\-4637\-add7\-1a35121f1a5a.*)" -# tempest.api.compute.servers.test_servers_microversions.ServerShowV257Test.test_rebuild_server -r="$r|(?:.*id\-803df848\-080a\-4261\-8f11\-b020cd9b6f60.*)" -# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server_in_stop_state -r="$r|(?:.*id\-30449a88\-5aff\-4f9b\-9866\-6ee9b17f906d.*)" -# tempest.api.compute.servers.test_servers.ServerShowV263Test.test_show_update_rebuild_list_server -r="$r|(?:.*id\-71b8e3d5\-11d2\-494f\-b917\-b094a4afed3c.*)" -# NOTE(mriedem): cells v1 api doesn't route os-server-external-events -# tempest.api.volume.test_volumes_extend.VolumesExtendAttachedTest.test_extend_attached_volume -r="$r|(?:.*id\-301f5a30\-1c6f\-4ea0\-be1a\-91fd28d44354.*)" -r="$r).*$" - -export DEVSTACK_GATE_TEMPEST_REGEX="$r" - -# Don't run the cells v1 job with ssh validation since it uses floating IPs -# by default which cells v1 doesn't support. -export DEVSTACK_LOCAL_CONFIG="TEMPEST_RUN_VALIDATION=False" diff --git a/devstack/tempest-dsvm-lvm-rc b/devstack/tempest-dsvm-lvm-rc deleted file mode 100644 index b4c5643b15b..00000000000 --- a/devstack/tempest-dsvm-lvm-rc +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2016 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# This script is executed in the OpenStack CI *tempest-dsvm-lvm job. -# It's used to configure which tempest tests actually get run. You can find -# the CI job configuration here: -# -# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/lvm.yaml -# - -# Construct a regex to use when limiting scope of tempest -# to avoid features unsupported by Nova's LVM support. - -# Note that several tests are disabled by the use of tempest -# feature toggles in devstack/lib/tempest for an lvm config, -# so this regex is not entirely representative of what's excluded. - -# When adding entries to the regex, add a comment explaining why -# since this list should not grow. - -r="^(?!.*" -r="$r(?:.*\[.*\bslow\b.*\])" - -# Only run compute API tests. The ! here looks confusing but it's to negate -# the ! at the beginning of the regex since the rest of this is meant to be -# a backlist. -r="$r|(?!.*api.compute.*)" - -# NOTE(mriedem): resize of non-volume-backed lvm instances does not yet work -# tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_list_migrations_in_flavor_resize_situation -r="$r|(?:.*id\-1b512062\-8093\-438e\-b47a\-37d2f597cd64.*)" -# tempest.api.compute.admin.test_migrations.MigrationsAdminTest.test_resize_server_revert_deleted_flavor -r="$r|(?:.*id\-33f1fec3\-ba18\-4470\-8e4e\-1d888e7c3593.*)" -# tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_verify_resize_state -r="$r|(?:.*id\-ab0c38b4\-cdd8\-49d3\-9b92\-0cb898723c01.*)" -# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_resize_server_from_auto_to_manual -r="$r|(?:.*id\-693d16f3\-556c\-489a\-8bac\-3d0ca2490bad.*)" -# tempest.api.compute.servers.test_disk_config.ServerDiskConfigTestJSON.test_resize_server_from_manual_to_auto -r="$r|(?:.*id\-414e7e93\-45b5\-44bc\-8e03\-55159c6bfc97.*)" -# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm -r="$r|(?:.*id\-1499262a\-9328\-4eda\-9068\-db1ac57498d2.*)" -# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped -r="$r|(?:.*id\-138b131d\-66df\-48c9\-a171\-64f45eb92962.*)" -# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert -r="$r|(?:.*id\-c03aab19\-adb1\-44f5\-917d\-c419577e9e68.*)" -# tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert_with_volume_attached -r="$r|(?:.*id\-fbbf075f\-a812\-4022\-bc5c\-ccb8047eef12.*)" -r="$r).*$" - -export DEVSTACK_GATE_TEMPEST_REGEX="$r" diff --git a/devstack/tempest-dsvm-tempest-xen-rc b/devstack/tempest-dsvm-tempest-xen-rc deleted file mode 100644 index 669c7054674..00000000000 --- a/devstack/tempest-dsvm-tempest-xen-rc +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# -# This script is executed in the Xen Project OpenStack CI dsvm-tempest-xen job. -# It's used to configure which tempest tests actually get run. You can find -# the CI job configuration here: -# -# https://xenbits.xen.org/gitweb/?p=openstack/ci-loop-config.git;a=blob;f=jenkins/jobs/jobs.yaml;hb=HEAD -# - -# When adding entries to the regex, add a comment explaining why -# since this list should not grow. - -# exclude the slow tag -r="\[.*\bslow\b.*\]" - -# volume_swap fail -# https://bugs.launchpad.net/nova/+bug/1676499 -r="$r|tempest\.api\.compute\.admin\.test_volume_swap\.TestVolumeSwap\.test_volume_swap" - -# Because paused guest can not be snapshot -# https://bugs.launchpad.net/nova/+bug/1675787 -r="$r|tempest\.api\.compute\.images\.test_images\.ImagesTestJSON\.test_create_image_from_paused_server" - -# Cannot boot from encrypted volume -# https://bugs.launchpad.net/nova/+bug/1702897 -r="$r|tempest\.scenario\.test_volume_boot_pattern\.TestVolumeBootPattern\.test_boot_server_from_encrypted_volume_luks" - -r="^(?!.*(?:$r))(?:^tempest\.(?:api|scenario|thirdparty))" -export DEVSTACK_GATE_TEMPEST_REGEX="$r" diff --git a/doc/README.rst b/doc/README.rst index 88b6c1d6ba1..fbb88995d18 100644 --- a/doc/README.rst +++ b/doc/README.rst @@ -8,7 +8,7 @@ Contributor developer docs are built to: https://docs.openstack.org/nova/latest/ API guide docs are built to: -https://developer.openstack.org/api-guide/compute/ +https://docs.openstack.org/api-guide/compute/ For more details, see the "Building the Documentation" section of doc/source/contributor/development-environment.rst. diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json index 63fc8738b03..856fc38c01f 100644 --- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json +++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } -} \ No newline at end of file +} diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json index 63fc8738b03..856fc38c01f 100644 --- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json +++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } -} \ No newline at end of file +} diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json index e71755fe675..02284618e52 100644 --- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json +++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json @@ -1,3 +1,3 @@ { - "key1": "value1" -} \ No newline at end of file + "hw:numa_nodes": "1" +} diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json index 63fc8738b03..856fc38c01f 100644 --- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json +++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json @@ -1,6 +1,6 @@ { "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } -} \ No newline at end of file +} diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json index a40d79e320c..eca615335a8 100644 --- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json +++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json @@ -1,3 +1,3 @@ { - "key1": "new_value1" -} \ No newline at end of file + "hw:numa_nodes": "2" +} diff --git a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json index a40d79e320c..eca615335a8 100644 --- a/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json +++ b/doc/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json @@ -1,3 +1,3 @@ { - "key1": "new_value1" -} \ No newline at end of file + "hw:numa_nodes": "2" +} diff --git a/doc/api_samples/flavor-manage/v2.75/flavor-create-post-req.json b/doc/api_samples/flavor-manage/v2.75/flavor-create-post-req.json new file mode 100644 index 00000000000..0d9926d7202 --- /dev/null +++ b/doc/api_samples/flavor-manage/v2.75/flavor-create-post-req.json @@ -0,0 +1,11 @@ +{ + "flavor": { + "name": "test_flavor", + "ram": 1024, + "vcpus": 2, + "disk": 10, + "id": "10", + "rxtx_factor": 2.0, + "description": "test description" + } +} diff --git a/doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json b/doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json new file mode 100644 index 00000000000..49dfd0c082a --- /dev/null +++ b/doc/api_samples/flavor-manage/v2.75/flavor-create-post-resp.json @@ -0,0 +1,26 @@ +{ + "flavor": { + "OS-FLV-DISABLED:disabled": false, + "disk": 10, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "10", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/10", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/10", + "rel": "bookmark" + } + ], + "name": "test_flavor", + "ram": 1024, + "swap": 0, + "rxtx_factor": 2.0, + "vcpus": 2, + "description": "test description", + "extra_specs": {} + } +} diff --git a/doc/api_samples/flavor-manage/v2.75/flavor-update-req.json b/doc/api_samples/flavor-manage/v2.75/flavor-update-req.json new file mode 100644 index 00000000000..93c8e1e8ab2 --- /dev/null +++ b/doc/api_samples/flavor-manage/v2.75/flavor-update-req.json @@ -0,0 +1,5 @@ +{ + "flavor": { + "description": "updated description" + } +} diff --git a/doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json b/doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json new file mode 100644 index 00000000000..4e92b10582c --- /dev/null +++ b/doc/api_samples/flavor-manage/v2.75/flavor-update-resp.json @@ -0,0 +1,26 @@ +{ + "flavor": { + "OS-FLV-DISABLED:disabled": false, + "disk": 1, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "1", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/flavors/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", + "rel": "bookmark" + } + ], + "name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1, + "rxtx_factor": 1.0, + "description": "updated description", + "extra_specs": {} + } +} diff --git a/doc/api_samples/flavors/v2.61/flavor-get-resp.json b/doc/api_samples/flavors/v2.61/flavor-get-resp.json index 124110adb7b..324b7711b86 100644 --- a/doc/api_samples/flavors/v2.61/flavor-get-resp.json +++ b/doc/api_samples/flavors/v2.61/flavor-get-resp.json @@ -22,8 +22,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } } diff --git a/doc/api_samples/flavors/v2.61/flavors-detail-resp.json b/doc/api_samples/flavors/v2.61/flavors-detail-resp.json index f615998043f..d4efe491f25 100644 --- a/doc/api_samples/flavors/v2.61/flavors-detail-resp.json +++ b/doc/api_samples/flavors/v2.61/flavors-detail-resp.json @@ -143,8 +143,7 @@ "rxtx_factor": 1.0, "description": null, "extra_specs": { - "hw:mem_page_size": "2048", - "hw:cpu_policy": "dedicated" + "hw:numa_nodes": "1" } }, { @@ -170,8 +169,8 @@ "rxtx_factor": 1.0, "description": "test description", "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" } } ] diff --git a/doc/api_samples/flavors/v2.75/flavor-get-resp.json b/doc/api_samples/flavors/v2.75/flavor-get-resp.json new file mode 100644 index 00000000000..1d3c709b722 --- /dev/null +++ b/doc/api_samples/flavors/v2.75/flavor-get-resp.json @@ -0,0 +1,29 @@ +{ + "flavor": { + "OS-FLV-DISABLED:disabled": false, + "disk": 20, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "7", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", + "rel": "bookmark" + } + ], + "name": "m1.small.description", + "ram": 2048, + "swap": 0, + "vcpus": 1, + "rxtx_factor": 1.0, + "description": "test description", + "extra_specs": { + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" + } + } +} diff --git a/doc/api_samples/flavors/v2.75/flavors-detail-resp.json b/doc/api_samples/flavors/v2.75/flavors-detail-resp.json new file mode 100644 index 00000000000..35eac681e76 --- /dev/null +++ b/doc/api_samples/flavors/v2.75/flavors-detail-resp.json @@ -0,0 +1,177 @@ +{ + "flavors": [ + { + "OS-FLV-DISABLED:disabled": false, + "disk": 1, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "1", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", + "rel": "bookmark" + } + ], + "name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1, + "rxtx_factor": 1.0, + "description": null, + "extra_specs": {} + }, + { + "OS-FLV-DISABLED:disabled": false, + "disk": 20, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "2", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", + "rel": "bookmark" + } + ], + "name": "m1.small", + "ram": 2048, + "swap": 0, + "vcpus": 1, + "rxtx_factor": 1.0, + "description": null, + "extra_specs": {} + }, + { + "OS-FLV-DISABLED:disabled": false, + "disk": 40, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "3", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", + "rel": "bookmark" + } + ], + "name": "m1.medium", + "ram": 4096, + "swap": 0, + "vcpus": 2, + "rxtx_factor": 1.0, + "description": null, + "extra_specs": {} + }, + { + "OS-FLV-DISABLED:disabled": false, + "disk": 80, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "4", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", + "rel": "bookmark" + } + ], + "name": "m1.large", + "ram": 8192, + "swap": 0, + "vcpus": 4, + "rxtx_factor": 1.0, + "description": null, + "extra_specs": {} + }, + { + "OS-FLV-DISABLED:disabled": false, + "disk": 160, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "5", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", + "rel": "bookmark" + } + ], + "name": "m1.xlarge", + "ram": 16384, + "swap": 0, + "vcpus": 8, + "rxtx_factor": 1.0, + "description": null, + "extra_specs": {} + }, + { + "OS-FLV-DISABLED:disabled": false, + "disk": 1, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "6", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", + "rel": "bookmark" + } + ], + "name": "m1.tiny.specs", + "ram": 512, + "swap": 0, + "vcpus": 1, + "rxtx_factor": 1.0, + "description": null, + "extra_specs": { + "hw:numa_nodes": "1" + } + }, + { + "OS-FLV-DISABLED:disabled": false, + "disk": 20, + "OS-FLV-EXT-DATA:ephemeral": 0, + "os-flavor-access:is_public": true, + "id": "7", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", + "rel": "bookmark" + } + ], + "name": "m1.small.description", + "ram": 2048, + "swap": 0, + "vcpus": 1, + "rxtx_factor": 1.0, + "description": "test description", + "extra_specs": { + "hw:cpu_policy": "shared", + "hw:numa_nodes": "1" + } + } + ] +} diff --git a/doc/api_samples/flavors/v2.75/flavors-list-resp.json b/doc/api_samples/flavors/v2.75/flavors-list-resp.json new file mode 100644 index 00000000000..f368ed5c66f --- /dev/null +++ b/doc/api_samples/flavors/v2.75/flavors-list-resp.json @@ -0,0 +1,109 @@ +{ + "flavors": [ + { + "id": "1", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", + "rel": "bookmark" + } + ], + "name": "m1.tiny", + "description": null + }, + { + "id": "2", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/2", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/2", + "rel": "bookmark" + } + ], + "name": "m1.small", + "description": null + }, + { + "id": "3", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/3", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/3", + "rel": "bookmark" + } + ], + "name": "m1.medium", + "description": null + }, + { + "id": "4", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/4", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/4", + "rel": "bookmark" + } + ], + "name": "m1.large", + "description": null + }, + { + "id": "5", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/5", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/5", + "rel": "bookmark" + } + ], + "name": "m1.xlarge", + "description": null + }, + { + "id": "6", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/6", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/6", + "rel": "bookmark" + } + ], + "name": "m1.tiny.specs", + "description": null + }, + { + "id": "7", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/flavors/7", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/7", + "rel": "bookmark" + } + ], + "name": "m1.small.description", + "description": "test description" + } + ] +} diff --git a/doc/api_samples/images/images-details-get-resp.json b/doc/api_samples/images/images-details-get-resp.json index dfe6ca16faa..034c35f0c08 100644 --- a/doc/api_samples/images/images-details-get-resp.json +++ b/doc/api_samples/images/images-details-get-resp.json @@ -207,6 +207,7 @@ } ], "metadata": { + "architecture": "x86_64", "kernel_id": "nokernel", "ramdisk_id": "nokernel" }, diff --git a/doc/api_samples/limits/limit-get-resp.json b/doc/api_samples/limits/limit-get-resp.json index 28309af04c6..f97939d2216 100644 --- a/doc/api_samples/limits/limit-get-resp.json +++ b/doc/api_samples/limits/limit-get-resp.json @@ -4,11 +4,11 @@ "maxImageMeta": 128, "maxPersonality": 5, "maxPersonalitySize": 10240, - "maxSecurityGroupRules": 20, - "maxSecurityGroups": 10, + "maxSecurityGroupRules": -1, + "maxSecurityGroups": -1, "maxServerMeta": 128, "maxTotalCores": 20, - "maxTotalFloatingIps": 10, + "maxTotalFloatingIps": -1, "maxTotalInstances": 10, "maxTotalKeypairs": 100, "maxTotalRAMSize": 51200, diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-add-host-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-add-host-post-req.json new file mode 100644 index 00000000000..4e6bdfef3f4 --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregate-add-host-post-req.json @@ -0,0 +1,5 @@ +{ + "add_host": { + "host": "compute" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json new file mode 100644 index 00000000000..8894e97f069 --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregate-images-post-req.json @@ -0,0 +1,6 @@ +{ + "cache": + [ + {"id": "70a599e0-31e7-49b7-b260-868f441e862b"} + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-metadata-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-metadata-post-req.json new file mode 100644 index 00000000000..7331e06a8c0 --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregate-metadata-post-req.json @@ -0,0 +1,9 @@ +{ + "set_metadata": + { + "metadata": + { + "key": "value" + } + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-post-req.json new file mode 100644 index 00000000000..624fe0c6291 --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregate-post-req.json @@ -0,0 +1,7 @@ +{ + "aggregate": + { + "name": "name", + "availability_zone": "london" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregate-post-resp.json new file mode 100644 index 00000000000..2e399d9c6c4 --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregate-post-resp.json @@ -0,0 +1,12 @@ +{ + "aggregate": { + "availability_zone": "london", + "created_at": "2019-10-08T15:15:27.988513", + "deleted": false, + "deleted_at": null, + "id": 1, + "name": "name", + "updated_at": null, + "uuid": "a25e34a2-4fc1-4876-82d0-cf930fa04b82" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-remove-host-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-remove-host-post-req.json new file mode 100644 index 00000000000..e42b053009e --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregate-remove-host-post-req.json @@ -0,0 +1,5 @@ +{ + "remove_host": { + "host": "compute" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-req.json b/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-req.json new file mode 100644 index 00000000000..0af1a37a4d9 --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-req.json @@ -0,0 +1,7 @@ +{ + "aggregate": + { + "name": "newname", + "availability_zone": "nova2" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-resp.json new file mode 100644 index 00000000000..350128a1a55 --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregate-update-post-resp.json @@ -0,0 +1,16 @@ +{ + "aggregate": { + "availability_zone": "nova2", + "created_at": "2019-10-11T14:19:00.718841", + "deleted": false, + "deleted_at": null, + "hosts": [], + "id": 1, + "metadata": { + "availability_zone": "nova2" + }, + "name": "newname", + "updated_at": "2019-10-11T14:19:00.785838", + "uuid": "4e7fa22f-f6cf-4e81-a5c7-6dc485815f81" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-add-host-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-add-host-post-resp.json new file mode 100644 index 00000000000..decbc8d365d --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregates-add-host-post-resp.json @@ -0,0 +1,18 @@ +{ + "aggregate": { + "availability_zone": "london", + "created_at": "2019-10-11T14:19:05.250053", + "deleted": false, + "deleted_at": null, + "hosts": [ + "compute" + ], + "id": 1, + "metadata": { + "availability_zone": "london" + }, + "name": "name", + "updated_at": null, + "uuid": "47832b50-a192-4900-affe-8f7fdf2d7f22" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-get-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-get-resp.json new file mode 100644 index 00000000000..7d978bdf275 --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregates-get-resp.json @@ -0,0 +1,16 @@ +{ + "aggregate": { + "availability_zone": "london", + "created_at": "2019-10-11T14:19:07.366577", + "deleted": false, + "deleted_at": null, + "hosts": [], + "id": 1, + "metadata": { + "availability_zone": "london" + }, + "name": "name", + "updated_at": null, + "uuid": "7c5ff84a-c901-4733-adf8-06875e265080" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-list-get-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-list-get-resp.json new file mode 100644 index 00000000000..e1b5f11539a --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregates-list-get-resp.json @@ -0,0 +1,20 @@ +{ + "aggregates": [ + { + "availability_zone": "london", + "created_at": "2019-10-11T14:19:07.386637", + "deleted": false, + "deleted_at": null, + "hosts": [ + "compute" + ], + "id": 1, + "metadata": { + "availability_zone": "london" + }, + "name": "name", + "updated_at": null, + "uuid": "070cb72c-f463-4f72-9c61-2c0556eb8c07" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-metadata-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-metadata-post-resp.json new file mode 100644 index 00000000000..f0860dad8ec --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregates-metadata-post-resp.json @@ -0,0 +1,17 @@ +{ + "aggregate": { + "availability_zone": "london", + "created_at": "2019-10-11T14:19:03.103465", + "deleted": false, + "deleted_at": null, + "hosts": [], + "id": 1, + "metadata": { + "availability_zone": "london", + "key": "value" + }, + "name": "name", + "updated_at": "2019-10-11T14:19:03.169058", + "uuid": "0843db7c-f161-446d-84c8-d936320da2e8" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-aggregates/v2.81/aggregates-remove-host-post-resp.json b/doc/api_samples/os-aggregates/v2.81/aggregates-remove-host-post-resp.json new file mode 100644 index 00000000000..b9b5bdefcde --- /dev/null +++ b/doc/api_samples/os-aggregates/v2.81/aggregates-remove-host-post-resp.json @@ -0,0 +1,16 @@ +{ + "aggregate": { + "availability_zone": "london", + "created_at": "2019-10-11T14:19:05.250053", + "deleted": false, + "deleted_at": null, + "hosts": [], + "id": 1, + "metadata": { + "availability_zone": "london" + }, + "name": "name", + "updated_at": null, + "uuid": "47832b50-a192-4900-affe-8f7fdf2d7f22" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-net_id-req.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-net_id-req.json new file mode 100644 index 00000000000..d64d7fbc3e0 --- /dev/null +++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-net_id-req.json @@ -0,0 +1,11 @@ +{ + "interfaceAttachment": { + "fixed_ips": [ + { + "ip_address": "192.168.1.3" + } + ], + "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", + "tag": "public" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-req.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-req.json new file mode 100644 index 00000000000..4e7285d0e98 --- /dev/null +++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-req.json @@ -0,0 +1,6 @@ +{ + "interfaceAttachment": { + "port_id": "ce531f90-199f-48c0-816c-13e38010b442", + "tag": "public" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json new file mode 100644 index 00000000000..0b0cf34a912 --- /dev/null +++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-create-resp.json @@ -0,0 +1,15 @@ +{ + "interfaceAttachment": { + "fixed_ips": [ + { + "ip_address": "192.168.1.3", + "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" + } + ], + "mac_addr": "fa:16:3e:4c:2c:30", + "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", + "port_id": "ce531f90-199f-48c0-816c-13e38010b442", + "port_state": "ACTIVE", + "tag": "public" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json new file mode 100644 index 00000000000..61de503fb4c --- /dev/null +++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-list-resp.json @@ -0,0 +1,17 @@ +{ + "interfaceAttachments": [ + { + "fixed_ips": [ + { + "ip_address": "192.168.1.3", + "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" + } + ], + "mac_addr": "fa:16:3e:4c:2c:30", + "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", + "port_id": "ce531f90-199f-48c0-816c-13e38010b442", + "port_state": "ACTIVE", + "tag": "public" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json new file mode 100644 index 00000000000..0b0cf34a912 --- /dev/null +++ b/doc/api_samples/os-attach-interfaces/v2.70/attach-interfaces-show-resp.json @@ -0,0 +1,15 @@ +{ + "interfaceAttachment": { + "fixed_ips": [ + { + "ip_address": "192.168.1.3", + "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef" + } + ], + "mac_addr": "fa:16:3e:4c:2c:30", + "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", + "port_id": "ce531f90-199f-48c0-816c-13e38010b442", + "port_state": "ACTIVE", + "tag": "public" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json b/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json index 5612a310b34..22181387783 100644 --- a/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json +++ b/doc/api_samples/os-availability-zone/availability-zone-detail-resp.json @@ -9,20 +9,6 @@ "updated_at": null } }, - "consoleauth": { - "nova-consoleauth": { - "active": true, - "available": true, - "updated_at": null - } - }, - "network": { - "nova-network": { - "active": true, - "available": true, - "updated_at": null - } - }, "scheduler": { "nova-scheduler": { "active": true, diff --git a/doc/api_samples/os-availability-zone/availability-zone-post-req.json b/doc/api_samples/os-availability-zone/availability-zone-post-req.json deleted file mode 100644 index e19960f6be3..00000000000 --- a/doc/api_samples/os-availability-zone/availability-zone-post-req.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "server" : { - "name" : "new-server-test", - "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", - "flavorRef" : "1", - "metadata" : { - "My Server Name" : "Apache1" - }, - "availability_zone": "nova", - "personality" : [ - { - "path" : "/etc/banner.txt", - "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" - } - ] - } -} diff --git a/doc/api_samples/os-availability-zone/availability-zone-post-resp.json b/doc/api_samples/os-availability-zone/availability-zone-post-resp.json deleted file mode 100644 index a13b8b9a5b3..00000000000 --- a/doc/api_samples/os-availability-zone/availability-zone-post-resp.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "server": { - "adminPass": "k4pKvTfcA4gY", - "id": "3e45fa2a-5204-466f-a684-c2a8e1c82d7f", - "links": [ - { - "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/3e45fa2a-5204-466f-a684-c2a8e1c82d7f", - "rel": "self" - }, - { - "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/3e45fa2a-5204-466f-a684-c2a8e1c82d7f", - "rel": "bookmark" - } - ], - "OS-DCF:diskConfig": "AUTO", - "security_groups": [ - { - "name": "default" - } - ] - } -} \ No newline at end of file diff --git a/doc/api_samples/os-cells/cells-list-empty-resp.json b/doc/api_samples/os-cells/cells-list-empty-resp.json deleted file mode 100644 index 5325a4e855e..00000000000 --- a/doc/api_samples/os-cells/cells-list-empty-resp.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "cells": [] -} \ No newline at end of file diff --git a/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json b/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json index caaa22774ad..e6bc92a6268 100644 --- a/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json +++ b/doc/api_samples/os-cloudpipe/cloud-pipe-get-resp.json @@ -3,7 +3,7 @@ { "created_at": "2012-11-27T17:18:01Z", "instance_id": "27deecdb-baa3-4a26-9c82-32994b815b01", - "internal_ip": "192.168.0.3", + "internal_ip": "192.168.1.30", "project_id": "fa1765bd-a352-49c7-a6b7-8ee108a3cb0c", "public_ip": "127.0.0.1", "public_port": 22, diff --git a/doc/api_samples/os-evacuate/v2.68/server-evacuate-find-host-req.json b/doc/api_samples/os-evacuate/v2.68/server-evacuate-find-host-req.json new file mode 100644 index 00000000000..bb3a11d9799 --- /dev/null +++ b/doc/api_samples/os-evacuate/v2.68/server-evacuate-find-host-req.json @@ -0,0 +1,5 @@ +{ + "evacuate": { + "adminPass": "MySecretPass" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-evacuate/v2.68/server-evacuate-req.json b/doc/api_samples/os-evacuate/v2.68/server-evacuate-req.json new file mode 100644 index 00000000000..f67555075aa --- /dev/null +++ b/doc/api_samples/os-evacuate/v2.68/server-evacuate-req.json @@ -0,0 +1,6 @@ +{ + "evacuate": { + "host": "testHost", + "adminPass": "MySecretPass" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-floating-ips/floating-ips-create-req.json b/doc/api_samples/os-floating-ips/floating-ips-create-req.json index 511b009bede..4bba875a5c4 100644 --- a/doc/api_samples/os-floating-ips/floating-ips-create-req.json +++ b/doc/api_samples/os-floating-ips/floating-ips-create-req.json @@ -1,3 +1,3 @@ { - "pool": "nova" -} \ No newline at end of file + "pool": "public" +} diff --git a/doc/api_samples/os-floating-ips/floating-ips-create-resp.json b/doc/api_samples/os-floating-ips/floating-ips-create-resp.json index fe161a7dd12..33c2c350b86 100644 --- a/doc/api_samples/os-floating-ips/floating-ips-create-resp.json +++ b/doc/api_samples/os-floating-ips/floating-ips-create-resp.json @@ -1,9 +1,9 @@ { "floating_ip": { "fixed_ip": null, - "id": 1, + "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c", "instance_id": null, - "ip": "10.10.10.1", - "pool": "nova" + "ip": "172.24.4.17", + "pool": "public" } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-floating-ips/floating-ips-get-resp.json b/doc/api_samples/os-floating-ips/floating-ips-get-resp.json index fe161a7dd12..33c2c350b86 100644 --- a/doc/api_samples/os-floating-ips/floating-ips-get-resp.json +++ b/doc/api_samples/os-floating-ips/floating-ips-get-resp.json @@ -1,9 +1,9 @@ { "floating_ip": { "fixed_ip": null, - "id": 1, + "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c", "instance_id": null, - "ip": "10.10.10.1", - "pool": "nova" + "ip": "172.24.4.17", + "pool": "public" } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json b/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json index 121dbd084e8..12f118da50d 100644 --- a/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json +++ b/doc/api_samples/os-floating-ips/floating-ips-list-empty-resp.json @@ -1,3 +1,3 @@ { "floating_ips": [] -} \ No newline at end of file +} diff --git a/doc/api_samples/os-floating-ips/floating-ips-list-resp.json b/doc/api_samples/os-floating-ips/floating-ips-list-resp.json index 4d58e0676a9..8585c4c7f9d 100644 --- a/doc/api_samples/os-floating-ips/floating-ips-list-resp.json +++ b/doc/api_samples/os-floating-ips/floating-ips-list-resp.json @@ -2,17 +2,17 @@ "floating_ips": [ { "fixed_ip": null, - "id": 1, + "id": "8baeddb4-45e2-4c36-8cb7-d79439a5f67c", "instance_id": null, - "ip": "10.10.10.1", - "pool": "nova" + "ip": "172.24.4.17", + "pool": "public" }, { "fixed_ip": null, - "id": 2, + "id": "05ef7490-745a-4af9-98e5-610dc97493c4", "instance_id": null, - "ip": "10.10.10.2", - "pool": "nova" + "ip": "172.24.4.78", + "pool": "public" } ] -} \ No newline at end of file +} diff --git a/doc/api_samples/os-hide-server-addresses/server-get-resp.json b/doc/api_samples/os-hide-server-addresses/server-get-resp.json deleted file mode 100644 index e72c78649df..00000000000 --- a/doc/api_samples/os-hide-server-addresses/server-get-resp.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "server": { - "accessIPv4": "1.2.3.4", - "accessIPv6": "80fe::", - "addresses": {}, - "created": "2013-09-24T14:39:00Z", - "flavor": { - "id": "1", - "links": [ - { - "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", - "rel": "bookmark" - } - ] - }, - "hostId": "d0635823e9162b22b90ff103f0c30f129bacf6ffb72f4d6fde87e738", - "id": "4bdee8c7-507f-40f2-8429-d301edd3791b", - "image": { - "id": "70a599e0-31e7-49b7-b260-868f441e862b", - "links": [ - { - "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", - "rel": "bookmark" - } - ] - }, - "key_name": null, - "links": [ - { - "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/4bdee8c7-507f-40f2-8429-d301edd3791b", - "rel": "self" - }, - { - "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/4bdee8c7-507f-40f2-8429-d301edd3791b", - "rel": "bookmark" - } - ], - "metadata": { - "My Server Name": "Apache1" - }, - "name": "new-server-test", - "config_drive": "", - "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", - "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", - "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", - "OS-EXT-STS:power_state": 1, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "os-extended-volumes:volumes_attached": [ - {"id": "volume_id1"}, - {"id": "volume_id2"} - ], - "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", - "OS-SRV-USG:terminated_at": null, - "progress": 0, - "security_groups": [ - { - "name": "default" - } - ], - "status": "ACTIVE", - "tenant_id": "6f70656e737461636b20342065766572", - "updated": "2013-09-24T14:39:01Z", - "user_id": "fake" - } -} diff --git a/doc/api_samples/os-hide-server-addresses/servers-details-resp.json b/doc/api_samples/os-hide-server-addresses/servers-details-resp.json deleted file mode 100644 index cfbf90ee893..00000000000 --- a/doc/api_samples/os-hide-server-addresses/servers-details-resp.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "servers": [ - { - "accessIPv4": "1.2.3.4", - "accessIPv6": "80fe::", - "addresses": {}, - "created": "2013-09-24T14:44:01Z", - "flavor": { - "id": "1", - "links": [ - { - "href": "http://openstack.example.com/6f70656e737461636b20342065766572/flavors/1", - "rel": "bookmark" - } - ] - }, - "hostId": "a4fa72ae8741e5e18fb062c15657b8f689b8da2837b734c61fc9eedd", - "id": "a747eac1-e3ed-446c-935a-c2a2853f919c", - "image": { - "id": "70a599e0-31e7-49b7-b260-868f441e862b", - "links": [ - { - "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", - "rel": "bookmark" - } - ] - }, - "key_name": null, - "links": [ - { - "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/a747eac1-e3ed-446c-935a-c2a2853f919c", - "rel": "self" - }, - { - "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/a747eac1-e3ed-446c-935a-c2a2853f919c", - "rel": "bookmark" - } - ], - "metadata": { - "My Server Name": "Apache1" - }, - "name": "new-server-test", - "config_drive": "", - "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", - "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", - "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", - "OS-EXT-STS:power_state": 1, - "OS-EXT-STS:task_state": null, - "OS-EXT-STS:vm_state": "active", - "os-extended-volumes:volumes_attached": [ - {"id": "volume_id1"}, - {"id": "volume_id2"} - ], - "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", - "OS-SRV-USG:terminated_at": null, - "progress": 0, - "security_groups": [ - { - "name": "default" - } - ], - "status": "ACTIVE", - "tenant_id": "6f70656e737461636b20342065766572", - "updated": "2013-09-24T14:44:01Z", - "user_id": "fake" - } - ], - "servers_links": [ - { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=a747eac1-e3ed-446c-935a-c2a2853f919c", - "rel": "next" - } - ] -} diff --git a/doc/api_samples/os-hide-server-addresses/servers-list-resp.json b/doc/api_samples/os-hide-server-addresses/servers-list-resp.json deleted file mode 100644 index 9481378c2ab..00000000000 --- a/doc/api_samples/os-hide-server-addresses/servers-list-resp.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "servers": [ - { - "id": "b2a7068b-8aed-41a4-aa74-af8feb984bae", - "links": [ - { - "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/b2a7068b-8aed-41a4-aa74-af8feb984bae", - "rel": "self" - }, - { - "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/b2a7068b-8aed-41a4-aa74-af8feb984bae", - "rel": "bookmark" - } - ], - "name": "new-server-test" - } - ], - "servers_links": [ - { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=b2a7068b-8aed-41a4-aa74-af8feb984bae", - "rel": "next" - } - ] -} \ No newline at end of file diff --git a/doc/api_samples/os-hosts/hosts-list-resp.json b/doc/api_samples/os-hosts/hosts-list-resp.json index 1413b3aabdc..cd6b8d0c6a2 100644 --- a/doc/api_samples/os-hosts/hosts-list-resp.json +++ b/doc/api_samples/os-hosts/hosts-list-resp.json @@ -10,16 +10,6 @@ "service": "compute", "zone": "nova" }, - { - "host_name": "e73ec0bd35c64de4a1adfa8b8969a1f6", - "service": "consoleauth", - "zone": "internal" - }, - { - "host_name": "396a8a0a234f476eb05fb9fbc5802ba7", - "service": "network", - "zone": "internal" - }, { "host_name": "abffda96592c4eacaf4111c28fddee17", "service": "scheduler", diff --git a/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json index 267e9d50998..2da7f09f4e5 100644 --- a/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json +++ b/doc/api_samples/os-hypervisors/v2.33/hypervisors-detail-resp.json @@ -33,7 +33,7 @@ "running_vms": 0, "service": { "host": "host1", - "id": 7, + "id": 6, "disabled_reason": null }, "vcpus": 2, @@ -42,7 +42,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=2", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=2", "rel": "next" } ] diff --git a/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json b/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json index 9a5771df022..bb531ace7a5 100644 --- a/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json +++ b/doc/api_samples/os-hypervisors/v2.33/hypervisors-list-resp.json @@ -9,7 +9,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors?limit=1&marker=2", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=2", "rel": "next" } ] diff --git a/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json index a2172f69a22..ed62b8cd476 100644 --- a/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json +++ b/doc/api_samples/os-hypervisors/v2.53/hypervisors-detail-resp.json @@ -42,7 +42,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", "rel": "next" } ] diff --git a/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json b/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json index ec10b4a106e..2171311a16c 100644 --- a/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json +++ b/doc/api_samples/os-hypervisors/v2.53/hypervisors-list-resp.json @@ -1,7 +1,7 @@ { "hypervisors": [ { - "hypervisor_hostname": "fake-mini", + "hypervisor_hostname": "host2", "id": "1bb62a04-c576-402c-8147-9e89757a09e3", "state": "up", "status": "enabled" @@ -9,7 +9,7 @@ ], "hypervisors_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/hypervisors?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=1bb62a04-c576-402c-8147-9e89757a09e3", "rel": "next" } ] diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json new file mode 100644 index 00000000000..a009a125ffc --- /dev/null +++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-resp.json @@ -0,0 +1,25 @@ +{ + "hypervisors": [ + { + "host_ip": "192.168.1.135", + "hypervisor_hostname": "host2", + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "id": "f6d28711-9c10-470e-8b31-c03f498b0032", + "service": { + "disabled_reason": null, + "host": "host2", + "id": "21bbb5fb-ec98-48b3-89cf-c94402c55611" + }, + "state": "up", + "status": "enabled", + "uptime": null + } + ], + "hypervisors_links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors/detail?limit=1&marker=f6d28711-9c10-470e-8b31-c03f498b0032", + "rel": "next" + } + ] +} diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-with-servers-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-with-servers-resp.json new file mode 100644 index 00000000000..26526b18cc2 --- /dev/null +++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-detail-with-servers-resp.json @@ -0,0 +1,29 @@ +{ + "hypervisors": [ + { + "host_ip": "192.168.1.135", + "hypervisor_hostname": "fake-mini", + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "id": "28b0e607-d58a-4602-a511-efe18024f4d5", + "servers": [ + { + "name": "test_server1", + "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" + }, + { + "name": "test_server2", + "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" + } + ], + "service": { + "disabled_reason": null, + "host": "compute", + "id": "40e769a5-7489-4cf3-be46-f6bd3e4e3c25" + }, + "state": "up", + "status": "enabled", + "uptime": null + } + ] +} diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-list-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-list-resp.json new file mode 100644 index 00000000000..7c042bf8fa6 --- /dev/null +++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-list-resp.json @@ -0,0 +1,16 @@ +{ + "hypervisors": [ + { + "hypervisor_hostname": "host2", + "id": "bfb90ba3-e13e-4413-90ff-5cdbfea727e2", + "state": "up", + "status": "enabled" + } + ], + "hypervisors_links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-hypervisors?limit=1&marker=bfb90ba3-e13e-4413-90ff-5cdbfea727e2", + "rel": "next" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-search-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-search-resp.json new file mode 100644 index 00000000000..6190e428bd7 --- /dev/null +++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-search-resp.json @@ -0,0 +1,10 @@ +{ + "hypervisors": [ + { + "hypervisor_hostname": "fake-mini", + "id": "6b7876c5-9ae7-4fa7-a5c8-28c796d17381", + "state": "up", + "status": "enabled" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-resp.json new file mode 100644 index 00000000000..d1e566d6eb6 --- /dev/null +++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-resp.json @@ -0,0 +1,17 @@ +{ + "hypervisor": { + "host_ip": "192.168.1.135", + "hypervisor_hostname": "fake-mini", + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "id": "f79c1cce-9972-44c6-aa30-1d9e6526ce37", + "service": { + "disabled_reason": null, + "host": "compute", + "id": "7e6b27b8-f563-4c21-baa4-a40d579ed8c4" + }, + "state": "up", + "status": "enabled", + "uptime": null + } +} diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json new file mode 100644 index 00000000000..0196b9ca5ea --- /dev/null +++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-show-with-servers-resp.json @@ -0,0 +1,27 @@ +{ + "hypervisor": { + "host_ip": "192.168.1.135", + "hypervisor_hostname": "fake-mini", + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "id": "a68a56ab-9c42-47c0-9309-879e4a6dbe86", + "servers": [ + { + "name": "test_server1", + "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" + }, + { + "name": "test_server2", + "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" + } + ], + "service": { + "disabled_reason": null, + "host": "compute", + "id": "8495059a-a079-4ab4-ad6f-cf45b81c877d" + }, + "state": "up", + "status": "enabled", + "uptime": null + } +} diff --git a/doc/api_samples/os-hypervisors/v2.88/hypervisors-with-servers-resp.json b/doc/api_samples/os-hypervisors/v2.88/hypervisors-with-servers-resp.json new file mode 100644 index 00000000000..abaea1ffd4e --- /dev/null +++ b/doc/api_samples/os-hypervisors/v2.88/hypervisors-with-servers-resp.json @@ -0,0 +1,20 @@ +{ + "hypervisors": [ + { + "hypervisor_hostname": "fake-mini", + "id": "39b0c938-8e2f-49da-bb52-e85c78d4ff2a", + "servers": [ + { + "name": "test_server1", + "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" + }, + { + "name": "test_server2", + "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" + } + ], + "state": "up", + "status": "enabled" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-timestamp-filter.json b/doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-changes-since.json similarity index 100% rename from doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-timestamp-filter.json rename to doc/api_samples/os-instance-actions/v2.58/instance-actions-list-with-changes-since.json diff --git a/doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-timestamp-filter.json b/doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-changes-since.json similarity index 100% rename from doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-timestamp-filter.json rename to doc/api_samples/os-instance-actions/v2.62/instance-actions-list-with-changes-since.json diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-action-get-non-admin-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-action-get-non-admin-resp.json new file mode 100644 index 00000000000..115604d6ac8 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.66/instance-action-get-non-admin-resp.json @@ -0,0 +1,21 @@ +{ + "instanceAction": { + "action": "stop", + "events": [ + { + "event": "compute_stop_instance", + "finish_time": "2018-04-25T01:26:34.784165", + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "result": "Success", + "start_time": "2018-04-25T01:26:34.612020" + } + ], + "instance_uuid": "79edaa44-ad4f-4af7-b994-154518c2b927", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-8eb28d4a-db6c-4337-bab8-ce154e9c620e", + "start_time": "2018-04-25T01:26:34.388280", + "updated_at": "2018-04-25T01:26:34.784165", + "user_id": "fake" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-action-get-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-action-get-resp.json new file mode 100644 index 00000000000..57ae490f023 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.66/instance-action-get-resp.json @@ -0,0 +1,23 @@ +{ + "instanceAction": { + "action": "stop", + "events": [ + { + "event": "compute_stop_instance", + "finish_time": "2018-04-25T01:26:36.790544", + "host": "compute", + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "result": "Success", + "start_time": "2018-04-25T01:26:36.539271", + "traceback": null + } + ], + "instance_uuid": "4bf3473b-d550-4b65-9409-292d44ab14a2", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0d819d5c-1527-4669-bdf0-ffad31b5105b", + "start_time": "2018-04-25T01:26:36.341290", + "updated_at": "2018-04-25T01:26:36.790544", + "user_id": "admin" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-resp.json new file mode 100644 index 00000000000..0b2254126b1 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-resp.json @@ -0,0 +1,24 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-f04d4b92-6241-42da-b82d-2cedb225c58d", + "start_time": "2018-04-25T01:26:36.036697", + "updated_at": "2018-04-25T01:26:36.525308", + "user_id": "admin" + }, + { + "action": "create", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", + "start_time": "2018-04-25T01:26:33.692125", + "updated_at": "2018-04-25T01:26:35.993821", + "user_id": "admin" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-before.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-before.json new file mode 100644 index 00000000000..28c58384e70 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-before.json @@ -0,0 +1,24 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", + "start_time": "2018-04-25T01:26:29.594237", + "updated_at": "2018-04-25T01:26:30.065061", + "user_id": "admin" + }, + { + "action": "create", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", + "start_time": "2018-04-25T01:26:33.692125", + "updated_at": "2018-04-25T01:26:35.993821", + "user_id": "admin" + } + ] +} diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json new file mode 100644 index 00000000000..346c93af7a9 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-changes-since.json @@ -0,0 +1,14 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", + "start_time": "2018-04-25T01:26:29.594237", + "updated_at": "2018-04-25T01:26:30.065061", + "user_id": "admin" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json new file mode 100644 index 00000000000..7126a9f2820 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-limit-resp.json @@ -0,0 +1,20 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "ca3d3be5-1a40-427f-9515-f5e181f479d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", + "start_time": "2018-04-25T01:26:28.909887", + "updated_at": "2018-04-25T01:26:29.400606", + "user_id": "admin" + } + ], + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ca3d3be5-1a40-427f-9515-f5e181f479d0/os-instance-actions?limit=1&marker=req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", + "rel": "next" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json new file mode 100644 index 00000000000..3f6921cb795 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.66/instance-actions-list-with-marker-resp.json @@ -0,0 +1,14 @@ +{ + "instanceActions": [ + { + "action": "create", + "instance_uuid": "9bde1fd5-8435-45c5-afc1-bedd0605275b", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-4510fb10-447f-4572-a64d-c2324547d86c", + "start_time": "2018-04-25T01:26:33.710291", + "updated_at": "2018-04-25T01:26:35.374936", + "user_id": "fake" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json new file mode 100644 index 00000000000..115604d6ac8 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.84/instance-action-get-non-admin-resp.json @@ -0,0 +1,21 @@ +{ + "instanceAction": { + "action": "stop", + "events": [ + { + "event": "compute_stop_instance", + "finish_time": "2018-04-25T01:26:34.784165", + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "result": "Success", + "start_time": "2018-04-25T01:26:34.612020" + } + ], + "instance_uuid": "79edaa44-ad4f-4af7-b994-154518c2b927", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-8eb28d4a-db6c-4337-bab8-ce154e9c620e", + "start_time": "2018-04-25T01:26:34.388280", + "updated_at": "2018-04-25T01:26:34.784165", + "user_id": "fake" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json new file mode 100644 index 00000000000..3285f39ef6a --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.84/instance-action-get-resp.json @@ -0,0 +1,24 @@ +{ + "instanceAction": { + "action": "stop", + "events": [ + { + "event": "compute_stop_instance", + "finish_time": "2018-04-25T01:26:36.790544", + "host": "compute", + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "result": "Success", + "start_time": "2018-04-25T01:26:36.539271", + "traceback": null, + "details": null + } + ], + "instance_uuid": "4bf3473b-d550-4b65-9409-292d44ab14a2", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0d819d5c-1527-4669-bdf0-ffad31b5105b", + "start_time": "2018-04-25T01:26:36.341290", + "updated_at": "2018-04-25T01:26:36.790544", + "user_id": "admin" + } +} diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json new file mode 100644 index 00000000000..0b2254126b1 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-resp.json @@ -0,0 +1,24 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-f04d4b92-6241-42da-b82d-2cedb225c58d", + "start_time": "2018-04-25T01:26:36.036697", + "updated_at": "2018-04-25T01:26:36.525308", + "user_id": "admin" + }, + { + "action": "create", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", + "start_time": "2018-04-25T01:26:33.692125", + "updated_at": "2018-04-25T01:26:35.993821", + "user_id": "admin" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json new file mode 100644 index 00000000000..28c58384e70 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-before.json @@ -0,0 +1,24 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", + "start_time": "2018-04-25T01:26:29.594237", + "updated_at": "2018-04-25T01:26:30.065061", + "user_id": "admin" + }, + { + "action": "create", + "instance_uuid": "15835b6f-1e14-4cfa-9f66-1abea1a1c0d5", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-d8790618-9bbf-4df0-8af8-fc9e24de29c0", + "start_time": "2018-04-25T01:26:33.692125", + "updated_at": "2018-04-25T01:26:35.993821", + "user_id": "admin" + } + ] +} diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json new file mode 100644 index 00000000000..346c93af7a9 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-changes-since.json @@ -0,0 +1,14 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "2150964c-30fe-4214-9547-8822375aa7d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-0c3b2079-0a44-474d-a5b2-7466d4b4c642", + "start_time": "2018-04-25T01:26:29.594237", + "updated_at": "2018-04-25T01:26:30.065061", + "user_id": "admin" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json new file mode 100644 index 00000000000..7126a9f2820 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-limit-resp.json @@ -0,0 +1,20 @@ +{ + "instanceActions": [ + { + "action": "stop", + "instance_uuid": "ca3d3be5-1a40-427f-9515-f5e181f479d0", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", + "start_time": "2018-04-25T01:26:28.909887", + "updated_at": "2018-04-25T01:26:29.400606", + "user_id": "admin" + } + ], + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ca3d3be5-1a40-427f-9515-f5e181f479d0/os-instance-actions?limit=1&marker=req-4dbefbb7-d743-4d42-b0a1-a79cbe256138", + "rel": "next" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json new file mode 100644 index 00000000000..3f6921cb795 --- /dev/null +++ b/doc/api_samples/os-instance-actions/v2.84/instance-actions-list-with-marker-resp.json @@ -0,0 +1,14 @@ +{ + "instanceActions": [ + { + "action": "create", + "instance_uuid": "9bde1fd5-8435-45c5-afc1-bedd0605275b", + "message": null, + "project_id": "6f70656e737461636b20342065766572", + "request_id": "req-4510fb10-447f-4572-a64d-c2324547d86c", + "start_time": "2018-04-25T01:26:33.710291", + "updated_at": "2018-04-25T01:26:35.374936", + "user_id": "fake" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/keypairs/keypairs-get-resp.json b/doc/api_samples/os-keypairs/keypairs-get-resp.json similarity index 100% rename from doc/api_samples/keypairs/keypairs-get-resp.json rename to doc/api_samples/os-keypairs/keypairs-get-resp.json diff --git a/doc/api_samples/keypairs/keypairs-import-post-req.json b/doc/api_samples/os-keypairs/keypairs-import-post-req.json similarity index 100% rename from doc/api_samples/keypairs/keypairs-import-post-req.json rename to doc/api_samples/os-keypairs/keypairs-import-post-req.json diff --git a/doc/api_samples/keypairs/keypairs-import-post-resp.json b/doc/api_samples/os-keypairs/keypairs-import-post-resp.json similarity index 100% rename from doc/api_samples/keypairs/keypairs-import-post-resp.json rename to doc/api_samples/os-keypairs/keypairs-import-post-resp.json diff --git a/doc/api_samples/keypairs/keypairs-list-resp.json b/doc/api_samples/os-keypairs/keypairs-list-resp.json similarity index 100% rename from doc/api_samples/keypairs/keypairs-list-resp.json rename to doc/api_samples/os-keypairs/keypairs-list-resp.json diff --git a/doc/api_samples/keypairs/keypairs-post-req.json b/doc/api_samples/os-keypairs/keypairs-post-req.json similarity index 100% rename from doc/api_samples/keypairs/keypairs-post-req.json rename to doc/api_samples/os-keypairs/keypairs-post-req.json diff --git a/doc/api_samples/keypairs/keypairs-post-resp.json b/doc/api_samples/os-keypairs/keypairs-post-resp.json similarity index 100% rename from doc/api_samples/keypairs/keypairs-post-resp.json rename to doc/api_samples/os-keypairs/keypairs-post-resp.json diff --git a/doc/api_samples/keypairs/v2.10/keypairs-get-resp.json b/doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.10/keypairs-get-resp.json rename to doc/api_samples/os-keypairs/v2.10/keypairs-get-resp.json diff --git a/doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json b/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json similarity index 100% rename from doc/api_samples/keypairs/v2.10/keypairs-import-post-req.json rename to doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json diff --git a/doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json b/doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.10/keypairs-import-post-resp.json rename to doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json diff --git a/doc/api_samples/keypairs/v2.10/keypairs-list-resp.json b/doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.10/keypairs-list-resp.json rename to doc/api_samples/os-keypairs/v2.10/keypairs-list-resp.json diff --git a/doc/api_samples/keypairs/v2.10/keypairs-post-req.json b/doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json similarity index 100% rename from doc/api_samples/keypairs/v2.10/keypairs-post-req.json rename to doc/api_samples/os-keypairs/v2.10/keypairs-post-req.json diff --git a/doc/api_samples/keypairs/v2.10/keypairs-post-resp.json b/doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.10/keypairs-post-resp.json rename to doc/api_samples/os-keypairs/v2.10/keypairs-post-resp.json diff --git a/doc/api_samples/keypairs/v2.2/keypairs-get-resp.json b/doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.2/keypairs-get-resp.json rename to doc/api_samples/os-keypairs/v2.2/keypairs-get-resp.json diff --git a/doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json b/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json similarity index 100% rename from doc/api_samples/keypairs/v2.2/keypairs-import-post-req.json rename to doc/api_samples/os-keypairs/v2.2/keypairs-import-post-req.json diff --git a/doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json b/doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.2/keypairs-import-post-resp.json rename to doc/api_samples/os-keypairs/v2.2/keypairs-import-post-resp.json diff --git a/doc/api_samples/keypairs/v2.2/keypairs-list-resp.json b/doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.2/keypairs-list-resp.json rename to doc/api_samples/os-keypairs/v2.2/keypairs-list-resp.json diff --git a/doc/api_samples/keypairs/v2.2/keypairs-post-req.json b/doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json similarity index 100% rename from doc/api_samples/keypairs/v2.2/keypairs-post-req.json rename to doc/api_samples/os-keypairs/v2.2/keypairs-post-req.json diff --git a/doc/api_samples/keypairs/v2.2/keypairs-post-resp.json b/doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.2/keypairs-post-resp.json rename to doc/api_samples/os-keypairs/v2.2/keypairs-post-resp.json diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json b/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json similarity index 89% rename from doc/api_samples/keypairs/v2.35/keypairs-list-resp.json rename to doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json index 69c8ec4f143..786a0b6ce2f 100644 --- a/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json +++ b/doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json @@ -11,8 +11,8 @@ ], "keypairs_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3", "rel": "next" } ] -} \ No newline at end of file +} diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json b/doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json rename to doc/api_samples/os-keypairs/v2.35/keypairs-list-user1-resp.json diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json b/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json similarity index 88% rename from doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json rename to doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json index 3c75f9ef621..e9a5e9318b6 100644 --- a/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json +++ b/doc/api_samples/os-keypairs/v2.35/keypairs-list-user2-resp.json @@ -11,8 +11,8 @@ ], "keypairs_links": [ { - "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3&user_id=user2", + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3&user_id=user2", "rel": "next" } ] -} \ No newline at end of file +} diff --git a/doc/api_samples/keypairs/v2.35/keypairs-post-req.json b/doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json similarity index 100% rename from doc/api_samples/keypairs/v2.35/keypairs-post-req.json rename to doc/api_samples/os-keypairs/v2.35/keypairs-post-req.json diff --git a/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json b/doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json similarity index 100% rename from doc/api_samples/keypairs/v2.35/keypairs-post-resp.json rename to doc/api_samples/os-keypairs/v2.35/keypairs-post-resp.json diff --git a/doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json b/doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json new file mode 100644 index 00000000000..c307fb39bf7 --- /dev/null +++ b/doc/api_samples/os-lock-server/v2.73/lock-server-with-reason.json @@ -0,0 +1,3 @@ +{ + "lock": {"locked_reason": "I don't want to work"} +} \ No newline at end of file diff --git a/doc/api_samples/os-lock-server/v2.73/lock-server.json b/doc/api_samples/os-lock-server/v2.73/lock-server.json new file mode 100644 index 00000000000..d7e96964ef2 --- /dev/null +++ b/doc/api_samples/os-lock-server/v2.73/lock-server.json @@ -0,0 +1,3 @@ +{ + "lock": null +} \ No newline at end of file diff --git a/doc/api_samples/os-lock-server/v2.73/unlock-server.json b/doc/api_samples/os-lock-server/v2.73/unlock-server.json new file mode 100644 index 00000000000..0eba7e72529 --- /dev/null +++ b/doc/api_samples/os-lock-server/v2.73/unlock-server.json @@ -0,0 +1,3 @@ +{ + "unlock": null +} \ No newline at end of file diff --git a/doc/api_samples/os-migrate-server/v2.68/live-migrate-server.json b/doc/api_samples/os-migrate-server/v2.68/live-migrate-server.json new file mode 100644 index 00000000000..0777861df53 --- /dev/null +++ b/doc/api_samples/os-migrate-server/v2.68/live-migrate-server.json @@ -0,0 +1,6 @@ +{ + "os-migrateLive": { + "host": "01c0cadef72d47e28a672a76060d492c", + "block_migration": "auto" + } +} diff --git a/doc/api_samples/os-migrations/migrations-get.json b/doc/api_samples/os-migrations/migrations-get.json index 91775be7758..bdcc768681d 100644 --- a/doc/api_samples/os-migrations/migrations-get.json +++ b/doc/api_samples/os-migrations/migrations-get.json @@ -6,12 +6,12 @@ "dest_host": "1.2.3.4", "dest_node": "node2", "id": 1234, - "instance_uuid": "instance_id_123", + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", "new_instance_type_id": 2, "old_instance_type_id": 1, "source_compute": "compute1", "source_node": "node1", - "status": "Done", + "status": "done", "updated_at": "2012-10-29T13:42:02.000000" }, { @@ -20,12 +20,12 @@ "dest_host": "5.6.7.8", "dest_node": "node20", "id": 5678, - "instance_uuid": "instance_id_456", + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", "new_instance_type_id": 6, "old_instance_type_id": 5, "source_compute": "compute10", "source_node": "node10", - "status": "Done", + "status": "done", "updated_at": "2013-10-22T13:42:02.000000" } ] diff --git a/doc/api_samples/os-migrations/v2.59/migrations-get-with-timestamp-filter.json b/doc/api_samples/os-migrations/v2.59/migrations-get-with-changes-since.json similarity index 100% rename from doc/api_samples/os-migrations/v2.59/migrations-get-with-timestamp-filter.json rename to doc/api_samples/os-migrations/v2.59/migrations-get-with-changes-since.json diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-before.json b/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-before.json new file mode 100644 index 00000000000..e829087f87d --- /dev/null +++ b/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-before.json @@ -0,0 +1,30 @@ +{ + "migrations": [ + { + "created_at": "2016-01-29T11:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "bookmark" + } + ], + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "migration_type": "live-migration", + "updated_at": "2016-01-29T11:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-since.json b/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-since.json new file mode 100644 index 00000000000..7d36fe4548b --- /dev/null +++ b/doc/api_samples/os-migrations/v2.66/migrations-get-with-changes-since.json @@ -0,0 +1,36 @@ +{ + "migrations": [ + { + "created_at": "2016-06-23T14:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 4, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "migrating", + "migration_type": "resize", + "updated_at": "2016-06-23T14:42:02.000000", + "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" + }, + { + "created_at": "2016-06-23T13:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 3, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "error", + "migration_type": "resize", + "updated_at": "2016-06-23T13:42:02.000000", + "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get-with-limit.json b/doc/api_samples/os-migrations/v2.66/migrations-get-with-limit.json new file mode 100644 index 00000000000..328106bb3f1 --- /dev/null +++ b/doc/api_samples/os-migrations/v2.66/migrations-get-with-limit.json @@ -0,0 +1,26 @@ + { + "migrations": [ + { + "created_at": "2016-06-23T14:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 4, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "migrating", + "migration_type": "resize", + "updated_at": "2016-06-23T14:42:02.000000", + "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" + } + ], + "migrations_links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-migrations?limit=1&marker=42341d4b-346a-40d0-83c6-5f4f6892b650", + "rel": "next" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get-with-marker.json b/doc/api_samples/os-migrations/v2.66/migrations-get-with-marker.json new file mode 100644 index 00000000000..e829087f87d --- /dev/null +++ b/doc/api_samples/os-migrations/v2.66/migrations-get-with-marker.json @@ -0,0 +1,30 @@ +{ + "migrations": [ + { + "created_at": "2016-01-29T11:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "bookmark" + } + ], + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "migration_type": "live-migration", + "updated_at": "2016-01-29T11:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.66/migrations-get.json b/doc/api_samples/os-migrations/v2.66/migrations-get.json new file mode 100644 index 00000000000..42ffca89638 --- /dev/null +++ b/doc/api_samples/os-migrations/v2.66/migrations-get.json @@ -0,0 +1,78 @@ +{ + "migrations": [ + { + "created_at": "2016-06-23T14:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 4, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "migrating", + "migration_type": "resize", + "updated_at": "2016-06-23T14:42:02.000000", + "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650" + }, + { + "created_at": "2016-06-23T13:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 3, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "error", + "migration_type": "resize", + "updated_at": "2016-06-23T13:42:02.000000", + "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650" + }, + { + "created_at": "2016-01-29T12:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 2, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "error", + "migration_type": "live-migration", + "updated_at": "2016-01-29T12:42:02.000000", + "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650" + }, + { + "created_at": "2016-01-29T11:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "bookmark" + } + ], + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "migration_type": "live-migration", + "updated_at": "2016-01-29T11:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-before.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-before.json new file mode 100644 index 00000000000..359d965c903 --- /dev/null +++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-before.json @@ -0,0 +1,32 @@ +{ + "migrations": [ + { + "created_at": "2016-01-29T11:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "bookmark" + } + ], + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "migration_type": "live-migration", + "updated_at": "2016-01-29T11:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", + "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-since.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-since.json new file mode 100644 index 00000000000..86c52f863f1 --- /dev/null +++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-changes-since.json @@ -0,0 +1,40 @@ +{ + "migrations": [ + { + "created_at": "2016-06-23T14:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 4, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "migrating", + "migration_type": "resize", + "updated_at": "2016-06-23T14:42:02.000000", + "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", + "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" + }, + { + "created_at": "2016-06-23T13:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 3, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "error", + "migration_type": "resize", + "updated_at": "2016-06-23T13:42:02.000000", + "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", + "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json new file mode 100644 index 00000000000..a2ed0e1e05b --- /dev/null +++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-limit.json @@ -0,0 +1,28 @@ + { + "migrations": [ + { + "created_at": "2016-06-23T14:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 4, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "migrating", + "migration_type": "resize", + "updated_at": "2016-06-23T14:42:02.000000", + "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", + "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" + } + ], + "migrations_links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/os-migrations?limit=1&marker=42341d4b-346a-40d0-83c6-5f4f6892b650", + "rel": "next" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-marker.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-marker.json new file mode 100644 index 00000000000..359d965c903 --- /dev/null +++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-marker.json @@ -0,0 +1,32 @@ +{ + "migrations": [ + { + "created_at": "2016-01-29T11:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "bookmark" + } + ], + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "migration_type": "live-migration", + "updated_at": "2016-01-29T11:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", + "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get-with-user-or-project-id.json b/doc/api_samples/os-migrations/v2.80/migrations-get-with-user-or-project-id.json new file mode 100644 index 00000000000..f7994fd400c --- /dev/null +++ b/doc/api_samples/os-migrations/v2.80/migrations-get-with-user-or-project-id.json @@ -0,0 +1,50 @@ +{ + "migrations": [ + { + "created_at": "2016-01-29T12:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 2, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "error", + "migration_type": "live-migration", + "updated_at": "2016-01-29T12:42:02.000000", + "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", + "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" + }, + { + "created_at": "2016-01-29T11:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "bookmark" + } + ], + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "migration_type": "live-migration", + "updated_at": "2016-01-29T11:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", + "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" + } + ] +} diff --git a/doc/api_samples/os-migrations/v2.80/migrations-get.json b/doc/api_samples/os-migrations/v2.80/migrations-get.json new file mode 100644 index 00000000000..ca568263946 --- /dev/null +++ b/doc/api_samples/os-migrations/v2.80/migrations-get.json @@ -0,0 +1,86 @@ +{ + "migrations": [ + { + "created_at": "2016-06-23T14:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 4, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "migrating", + "migration_type": "resize", + "updated_at": "2016-06-23T14:42:02.000000", + "uuid": "42341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", + "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" + }, + { + "created_at": "2016-06-23T13:42:02.000000", + "dest_compute": "compute20", + "dest_host": "5.6.7.8", + "dest_node": "node20", + "id": 3, + "instance_uuid": "9128d044-7b61-403e-b766-7547076ff6c1", + "new_instance_type_id": 6, + "old_instance_type_id": 5, + "source_compute": "compute10", + "source_node": "node10", + "status": "error", + "migration_type": "resize", + "updated_at": "2016-06-23T13:42:02.000000", + "uuid": "32341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "78348f0e-97ee-4d70-ad34-189692673ea2", + "project_id": "9842f0f7-1229-4355-afe7-15ebdbb8c3d8" + }, + { + "created_at": "2016-01-29T12:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 2, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "error", + "migration_type": "live-migration", + "updated_at": "2016-01-29T12:42:02.000000", + "uuid": "22341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", + "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" + }, + { + "created_at": "2016-01-29T11:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "instance_uuid": "8600d31b-d1a1-4632-b2ff-45c2be1a70ff", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/8600d31b-d1a1-4632-b2ff-45c2be1a70ff/migrations/1", + "rel": "bookmark" + } + ], + "new_instance_type_id": 1, + "old_instance_type_id": 1, + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "migration_type": "live-migration", + "updated_at": "2016-01-29T11:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "5c48ebaa-193f-4c5d-948a-f559cc92cd5e", + "project_id": "ef92ccff-00f3-46e4-b015-811110e36ee4" + } + ] +} diff --git a/doc/api_samples/os-networks/network-show-resp.json b/doc/api_samples/os-networks/network-show-resp.json index 78b34950173..d6d12e41936 100644 --- a/doc/api_samples/os-networks/network-show-resp.json +++ b/doc/api_samples/os-networks/network-show-resp.json @@ -1,36 +1,36 @@ { "network": { - "bridge": "br100", - "bridge_interface": "eth0", - "broadcast": "10.0.0.7", - "cidr": "10.0.0.0/29", + "bridge": null, + "bridge_interface": null, + "broadcast": null, + "cidr": null, "cidr_v6": null, - "created_at": "2011-08-15T06:19:19.387525", - "deleted": false, + "created_at": null, + "deleted": null, "deleted_at": null, - "dhcp_server": "10.0.0.1", - "dhcp_start": "10.0.0.3", + "dhcp_server": null, + "dhcp_start": null, "dns1": null, "dns2": null, - "enable_dhcp": true, - "gateway": "10.0.0.1", + "enable_dhcp": null, + "gateway": null, "gateway_v6": null, - "host": "nsokolov-desktop", + "host": null, "id": "20c8acc0-f747-4d71-a389-46d078ebf047", - "injected": false, - "label": "mynet_0", + "injected": null, + "label": "private", "mtu": null, - "multi_host": false, - "netmask": "255.255.255.248", + "multi_host": null, + "netmask": null, "netmask_v6": null, "priority": null, - "project_id": "6133f8b603924f45bc0c9e21f6df12fa", + "project_id": null, "rxtx_base": null, - "share_address": false, - "updated_at": "2011-08-16T09:26:13.048257", - "vlan": 100, - "vpn_private_address": "10.0.0.2", - "vpn_public_address": "127.0.0.1", - "vpn_public_port": 1000 + "share_address": null, + "updated_at": null, + "vlan": null, + "vpn_private_address": null, + "vpn_public_address": null, + "vpn_public_port": null } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-networks/networks-list-resp.json b/doc/api_samples/os-networks/networks-list-resp.json index 655fcaa8cca..886beb71708 100644 --- a/doc/api_samples/os-networks/networks-list-resp.json +++ b/doc/api_samples/os-networks/networks-list-resp.json @@ -1,72 +1,38 @@ { "networks": [ { - "bridge": "br100", - "bridge_interface": "eth0", - "broadcast": "10.0.0.7", - "cidr": "10.0.0.0/29", + "bridge": null, + "bridge_interface": null, + "broadcast": null, + "cidr": null, "cidr_v6": null, - "created_at": "2011-08-15T06:19:19.387525", - "deleted": false, + "created_at": null, + "deleted": null, "deleted_at": null, - "dhcp_server": "10.0.0.1", - "dhcp_start": "10.0.0.3", + "dhcp_server": null, + "dhcp_start": null, "dns1": null, "dns2": null, - "enable_dhcp": true, - "gateway": "10.0.0.1", - "gateway_v6": null, - "host": "nsokolov-desktop", - "id": "20c8acc0-f747-4d71-a389-46d078ebf047", - "injected": false, - "label": "mynet_0", - "mtu": null, - "multi_host": false, - "netmask": "255.255.255.248", - "netmask_v6": null, - "priority": null, - "project_id": "6133f8b603924f45bc0c9e21f6df12fa", - "rxtx_base": null, - "share_address": false, - "updated_at": "2011-08-16T09:26:13.048257", - "vlan": 100, - "vpn_private_address": "10.0.0.2", - "vpn_public_address": "127.0.0.1", - "vpn_public_port": 1000 - }, - { - "bridge": "br101", - "bridge_interface": "eth0", - "broadcast": "10.0.0.15", - "cidr": "10.0.0.10/29", - "cidr_v6": null, - "created_at": "2011-08-15T06:19:19.885495", - "deleted": false, - "deleted_at": null, - "dhcp_server": "10.0.0.9", - "dhcp_start": "10.0.0.11", - "dns1": null, - "dns2": null, - "enable_dhcp": true, - "gateway": "10.0.0.9", + "enable_dhcp": null, + "gateway": null, "gateway_v6": null, "host": null, - "id": "20c8acc0-f747-4d71-a389-46d078ebf000", - "injected": false, - "label": "mynet_1", + "id": "20c8acc0-f747-4d71-a389-46d078ebf047", + "injected": null, + "label": "private", "mtu": null, - "multi_host": false, - "netmask": "255.255.255.248", + "multi_host": null, + "netmask": null, "netmask_v6": null, "priority": null, "project_id": null, "rxtx_base": null, - "share_address": false, + "share_address": null, "updated_at": null, - "vlan": 101, - "vpn_private_address": "10.0.0.10", + "vlan": null, + "vpn_private_address": null, "vpn_public_address": null, - "vpn_public_port": 1001 + "vpn_public_port": null } ] -} \ No newline at end of file +} diff --git a/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json b/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json index 38ef12daaa8..f65af997aae 100644 --- a/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json +++ b/doc/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-preserve-ephemeral-resp.json @@ -6,7 +6,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json index 2fcfa2b9dd6..9a9fe365072 100644 --- a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json +++ b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json @@ -2,7 +2,7 @@ "quota_class_set": { "cores": 20, "fixed_ips": -1, - "floating_ips": 10, + "floating_ips": -1, "id": "test_class", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, @@ -11,8 +11,7 @@ "key_pairs": 100, "metadata_items": 128, "ram": 51200, - "security_group_rules": 20, - "security_groups": 10, - "networks": 3 + "security_group_rules": -1, + "security_groups": -1 } } diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json index 4e3af3fa86b..736489fdf73 100644 --- a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json +++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-req.json @@ -3,15 +3,14 @@ "instances": 50, "cores": 50, "ram": 51200, - "floating_ips": 10, + "floating_ips": -1, "fixed_ips": -1, "metadata_items": 128, "injected_files": 5, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, - "security_groups": 10, - "security_group_rules": 20, - "key_pairs": 100, - "networks": 3 + "security_groups": -1, + "security_group_rules": -1, + "key_pairs": 100 } } diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json index c58474b539b..90c88fc450d 100644 --- a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json +++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json @@ -2,7 +2,7 @@ "quota_class_set": { "cores": 50, "fixed_ips": -1, - "floating_ips": 10, + "floating_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, @@ -10,8 +10,7 @@ "key_pairs": 100, "metadata_items": 128, "ram": 51200, - "security_group_rules": 20, - "security_groups": 10, - "networks": 3 + "security_group_rules": -1, + "security_groups": -1 } } diff --git a/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json index 620fa8ed357..714ca5923ae 100644 --- a/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json +++ b/doc/api_samples/os-quota-sets-noop/quotas-show-defaults-get-resp.json @@ -10,11 +10,10 @@ "instances": -1, "key_pairs": -1, "metadata_items": -1, - "networks": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json index 47af14b048f..59fd38c7812 100644 --- a/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json +++ b/doc/api_samples/os-quota-sets-noop/quotas-show-detail-get-resp.json @@ -46,11 +46,6 @@ "limit": -1, "reserved": -1 }, - "networks": { - "in_use": -1, - "limit": -1, - "reserved": -1 - }, "ram": { "in_use": -1, "limit": -1, @@ -77,4 +72,4 @@ "reserved": -1 } } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json index 620fa8ed357..714ca5923ae 100644 --- a/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json +++ b/doc/api_samples/os-quota-sets-noop/quotas-show-get-resp.json @@ -10,11 +10,10 @@ "instances": -1, "key_pairs": -1, "metadata_items": -1, - "networks": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json index 0a4c3e9008d..370bd87ad1e 100644 --- a/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json +++ b/doc/api_samples/os-quota-sets-noop/quotas-update-force-post-resp.json @@ -9,11 +9,10 @@ "instances": -1, "key_pairs": -1, "metadata_items": -1, - "networks": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json index 0a4c3e9008d..370bd87ad1e 100644 --- a/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json +++ b/doc/api_samples/os-quota-sets-noop/quotas-update-post-resp.json @@ -9,11 +9,10 @@ "instances": -1, "key_pairs": -1, "metadata_items": -1, - "networks": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json b/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json index 620fa8ed357..714ca5923ae 100644 --- a/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json +++ b/doc/api_samples/os-quota-sets-noop/user-quotas-show-get-resp.json @@ -10,11 +10,10 @@ "instances": -1, "key_pairs": -1, "metadata_items": -1, - "networks": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json b/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json index 0a4c3e9008d..370bd87ad1e 100644 --- a/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json +++ b/doc/api_samples/os-quota-sets-noop/user-quotas-update-post-resp.json @@ -9,11 +9,10 @@ "instances": -1, "key_pairs": -1, "metadata_items": -1, - "networks": -1, "ram": -1, "security_group_rules": -1, "security_groups": -1, "server_group_members": -1, "server_groups": -1 } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json index e03bc651eba..67771a0a8c0 100644 --- a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json +++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json @@ -2,7 +2,7 @@ "quota_set": { "cores": 20, "fixed_ips": -1, - "floating_ips": 10, + "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, @@ -11,10 +11,9 @@ "key_pairs": 100, "metadata_items": 128, "ram": 51200, - "security_group_rules": 20, - "security_groups": 10, + "security_group_rules": -1, + "security_groups": -1, "server_groups": 10, - "server_group_members": 10, - "networks": 3 + "server_group_members": 10 } } diff --git a/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json index bca3a1e82a0..65ca1bf5b68 100644 --- a/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json +++ b/doc/api_samples/os-quota-sets/quotas-show-detail-get-resp.json @@ -12,7 +12,7 @@ }, "floating_ips": { "in_use": 0, - "limit": 10, + "limit": -1, "reserved": 0 }, "id": "fake_tenant", @@ -53,12 +53,12 @@ }, "security_group_rules": { "in_use": 0, - "limit": 20, + "limit": -1, "reserved": 0 }, "security_groups": { "in_use": 0, - "limit": 10, + "limit": -1, "reserved": 0 }, "server_group_members": { @@ -70,11 +70,6 @@ "in_use": 0, "limit": 10, "reserved": 0 - }, - "networks": { - "in_use": 2, - "limit": 3, - "reserved": 0 } } } diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json index e03bc651eba..67771a0a8c0 100644 --- a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json +++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json @@ -2,7 +2,7 @@ "quota_set": { "cores": 20, "fixed_ips": -1, - "floating_ips": 10, + "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, @@ -11,10 +11,9 @@ "key_pairs": 100, "metadata_items": 128, "ram": 51200, - "security_group_rules": 20, - "security_groups": 10, + "security_group_rules": -1, + "security_groups": -1, "server_groups": 10, - "server_group_members": 10, - "networks": 3 + "server_group_members": 10 } } diff --git a/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json index ab901db2c92..2811ac02b20 100644 --- a/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json +++ b/doc/api_samples/os-quota-sets/quotas-update-force-post-resp.json @@ -2,7 +2,7 @@ "quota_set": { "cores": 20, "fixed_ips": -1, - "floating_ips": 10, + "floating_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, @@ -10,10 +10,9 @@ "key_pairs": 100, "metadata_items": 128, "ram": 51200, - "security_group_rules": 20, - "security_groups": 10, + "security_group_rules": -1, + "security_groups": -1, "server_groups": 10, - "server_group_members": 10, - "networks": 3 + "server_group_members": 10 } } diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-req.json b/doc/api_samples/os-quota-sets/quotas-update-post-req.json index 0b78cff3095..2a9517bea4e 100644 --- a/doc/api_samples/os-quota-sets/quotas-update-post-req.json +++ b/doc/api_samples/os-quota-sets/quotas-update-post-req.json @@ -1,5 +1,5 @@ { "quota_set": { - "security_groups": 45 + "cores": 45 } -} \ No newline at end of file +} diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json index f17ad087f17..93c877aa754 100644 --- a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json +++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json @@ -1,8 +1,8 @@ { "quota_set": { - "cores": 20, + "cores": 45, "fixed_ips": -1, - "floating_ips": 10, + "floating_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, @@ -10,10 +10,9 @@ "key_pairs": 100, "metadata_items": 128, "ram": 51200, - "security_group_rules": 20, - "security_groups": 45, + "security_group_rules": -1, + "security_groups": -1, "server_groups": 10, - "server_group_members": 10, - "networks": 3 + "server_group_members": 10 } } diff --git a/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json index e03bc651eba..67771a0a8c0 100644 --- a/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json +++ b/doc/api_samples/os-quota-sets/user-quotas-show-get-resp.json @@ -2,7 +2,7 @@ "quota_set": { "cores": 20, "fixed_ips": -1, - "floating_ips": 10, + "floating_ips": -1, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, @@ -11,10 +11,9 @@ "key_pairs": 100, "metadata_items": 128, "ram": 51200, - "security_group_rules": 20, - "security_groups": 10, + "security_group_rules": -1, + "security_groups": -1, "server_groups": 10, - "server_group_members": 10, - "networks": 3 + "server_group_members": 10 } } diff --git a/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json index 4ac251c0868..92252d1a434 100644 --- a/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json +++ b/doc/api_samples/os-quota-sets/user-quotas-update-post-resp.json @@ -2,7 +2,7 @@ "quota_set": { "cores": 20, "fixed_ips": -1, - "floating_ips": 10, + "floating_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, @@ -10,10 +10,9 @@ "key_pairs": 100, "metadata_items": 128, "ram": 51200, - "security_group_rules": 20, - "security_groups": 10, + "security_group_rules": -1, + "security_groups": -1, "server_groups": 10, - "server_group_members": 10, - "networks": 3 + "server_group_members": 10 } } diff --git a/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json b/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json index fe15b779335..faa6ce30225 100644 --- a/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json +++ b/doc/api_samples/os-remote-consoles/get-vnc-console-post-resp.json @@ -1,6 +1,6 @@ { "console": { "type": "novnc", - "url": "http://127.0.0.1:6080/vnc_auto.html?token=191996c3-7b0f-42f3-95a7-f1839f2da6ed" + "url": "http://127.0.0.1:6080/vnc_auto.html?path=%3Ftoken%3Ddaae261f-474d-4cae-8f6a-1865278ed8c9" } } \ No newline at end of file diff --git a/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json b/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json index b427a690222..12eade5a2ba 100644 --- a/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json +++ b/doc/api_samples/os-remote-consoles/v2.6/create-vnc-console-resp.json @@ -2,6 +2,6 @@ "remote_console": { "protocol": "vnc", "type": "novnc", - "url": "http://example.com:6080/vnc_auto.html?token=b60bcfc3-5fd4-4d21-986c-e83379107819" + "url": "http://example.com:6080/vnc_auto.html?path=%3Ftoken%3Db60bcfc3-5fd4-4d21-986c-e83379107819" } } diff --git a/doc/api_samples/os-rescue/server-get-resp-rescue.json b/doc/api_samples/os-rescue/server-get-resp-rescue.json index 8780bc668d3..9a99aa8824a 100644 --- a/doc/api_samples/os-rescue/server-get-resp-rescue.json +++ b/doc/api_samples/os-rescue/server-get-resp-rescue.json @@ -5,8 +5,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -54,10 +54,7 @@ "user_id": "fake", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", - "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", - "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 4, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "rescued", diff --git a/doc/api_samples/os-rescue/server-get-resp-unrescue.json b/doc/api_samples/os-rescue/server-get-resp-unrescue.json index a8c9f271aaf..581dc19e018 100644 --- a/doc/api_samples/os-rescue/server-get-resp-unrescue.json +++ b/doc/api_samples/os-rescue/server-get-resp-unrescue.json @@ -5,8 +5,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -55,10 +55,7 @@ "user_id": "fake", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", - "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", - "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", diff --git a/doc/api_samples/os-rescue/server-unrescue-req.json b/doc/api_samples/os-rescue/server-unrescue-req.json index cafc9b13a84..635fb7a25ed 100644 --- a/doc/api_samples/os-rescue/server-unrescue-req.json +++ b/doc/api_samples/os-rescue/server-unrescue-req.json @@ -1,3 +1,3 @@ { "unrescue": null -} \ No newline at end of file +} diff --git a/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json b/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json new file mode 100644 index 00000000000..4fc5ce6f1e7 --- /dev/null +++ b/doc/api_samples/os-rescue/v2.87/server-get-resp-rescue.json @@ -0,0 +1,76 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "us-west", + "OS-EXT-STS:power_state": 4, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "rescued", + "OS-SRV-USG:launched_at": "2020-02-07T17:39:49.259481", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2020-02-07T17:39:48Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "id": "69bebe1c-3bdb-4feb-9b79-afa3d4782d95", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/69bebe1c-3bdb-4feb-9b79-afa3d4782d95", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/69bebe1c-3bdb-4feb-9b79-afa3d4782d95", + "rel": "bookmark" + } + ], + "locked": false, + "locked_reason": null, + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "os-extended-volumes:volumes_attached": [], + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [], + "status": "RESCUE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2020-02-07T17:39:49Z", + "user_id": "fake" + } +} diff --git a/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json b/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json new file mode 100644 index 00000000000..2d54aa13821 --- /dev/null +++ b/doc/api_samples/os-rescue/v2.87/server-get-resp-unrescue.json @@ -0,0 +1,77 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "us-west", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2020-02-07T17:39:55.632592", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2020-02-07T17:39:54Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "id": "5a0ffa96-ae59-4f82-b7a6-e0c9007cd576", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/5a0ffa96-ae59-4f82-b7a6-e0c9007cd576", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/5a0ffa96-ae59-4f82-b7a6-e0c9007cd576", + "rel": "bookmark" + } + ], + "locked": false, + "locked_reason": null, + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2020-02-07T17:39:56Z", + "user_id": "fake" + } +} diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json b/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json new file mode 100644 index 00000000000..1cfab528728 --- /dev/null +++ b/doc/api_samples/os-rescue/v2.87/server-rescue-req-with-image-ref.json @@ -0,0 +1,6 @@ +{ + "rescue": { + "adminPass": "MySecretPass", + "rescue_image_ref": "70a599e0-31e7-49b7-b260-868f441e862b" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue-req.json b/doc/api_samples/os-rescue/v2.87/server-rescue-req.json new file mode 100644 index 00000000000..3796600282f --- /dev/null +++ b/doc/api_samples/os-rescue/v2.87/server-rescue-req.json @@ -0,0 +1,5 @@ +{ + "rescue": { + "adminPass": "MySecretPass" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-rescue/v2.87/server-rescue.json b/doc/api_samples/os-rescue/v2.87/server-rescue.json new file mode 100644 index 00000000000..6cd942395fe --- /dev/null +++ b/doc/api_samples/os-rescue/v2.87/server-rescue.json @@ -0,0 +1,3 @@ +{ + "adminPass": "MySecretPass" +} \ No newline at end of file diff --git a/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json b/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json new file mode 100644 index 00000000000..cafc9b13a84 --- /dev/null +++ b/doc/api_samples/os-rescue/v2.87/server-unrescue-req.json @@ -0,0 +1,3 @@ +{ + "unrescue": null +} \ No newline at end of file diff --git a/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json b/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json index 1fdc541a73e..9c20c9f0865 100644 --- a/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json +++ b/doc/api_samples/os-server-tags/v2.26/server-tags-show-details-resp.json @@ -6,8 +6,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -58,17 +58,7 @@ "description": null, "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", - "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", - "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", - "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", - "OS-EXT-SRV-ATTR:launch_index": 0, - "OS-EXT-SRV-ATTR:kernel_id": "", - "OS-EXT-SRV-ATTR:ramdisk_id": "", - "OS-EXT-SRV-ATTR:hostname": "fake-hostname", - "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", - "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", @@ -79,7 +69,6 @@ { "name": "default" } - ], - "host_status": "UP" + ] } } diff --git a/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json b/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json index c1ea45a29ce..1e9cf8f99a5 100644 --- a/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json +++ b/doc/api_samples/os-server-tags/v2.26/servers-tags-details-resp.json @@ -6,8 +6,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -59,17 +59,7 @@ "description": null, "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", - "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", - "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", - "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", - "OS-EXT-SRV-ATTR:reservation_id": "r-00000001", - "OS-EXT-SRV-ATTR:launch_index": 0, - "OS-EXT-SRV-ATTR:kernel_id": "", - "OS-EXT-SRV-ATTR:ramdisk_id": "", - "OS-EXT-SRV-ATTR:hostname": "fake-hostname", - "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", - "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-STS:power_state": 1, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", @@ -80,8 +70,7 @@ { "name": "default" } - ], - "host_status": "UP" + ] } ] } diff --git a/doc/api_samples/os-server-topology/v2.78/servers-topology-resp-user.json b/doc/api_samples/os-server-topology/v2.78/servers-topology-resp-user.json new file mode 100644 index 00000000000..0d3677c6c4d --- /dev/null +++ b/doc/api_samples/os-server-topology/v2.78/servers-topology-resp-user.json @@ -0,0 +1,31 @@ +{ + "nodes": [ + { + "memory_mb": 1024, + "siblings": [ + [ + 0, + 1 + ] + ], + "vcpu_set": [ + 0, + 1 + ] + }, + { + "memory_mb": 2048, + "siblings": [ + [ + 2, + 3 + ] + ], + "vcpu_set": [ + 2, + 3 + ] + } + ], + "pagesize_kb": 4 +} diff --git a/doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json b/doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json new file mode 100644 index 00000000000..a918a2ade59 --- /dev/null +++ b/doc/api_samples/os-server-topology/v2.78/servers-topology-resp.json @@ -0,0 +1,41 @@ +{ + "nodes": [ + { + "cpu_pinning": { + "0": 0, + "1": 5 + }, + "host_node": 0, + "memory_mb": 1024, + "siblings": [ + [ + 0, + 1 + ] + ], + "vcpu_set": [ + 0, + 1 + ] + }, + { + "cpu_pinning": { + "2": 1, + "3": 8 + }, + "host_node": 1, + "memory_mb": 2048, + "siblings": [ + [ + 2, + 3 + ] + ], + "vcpu_set": [ + 2, + 3 + ] + } + ], + "pagesize_kb": 4 +} diff --git a/doc/api_samples/os-services/v2.69/services-list-get-resp.json b/doc/api_samples/os-services/v2.69/services-list-get-resp.json new file mode 100644 index 00000000000..6b06ba63eb7 --- /dev/null +++ b/doc/api_samples/os-services/v2.69/services-list-get-resp.json @@ -0,0 +1,14 @@ +{ + "services": [ + { + "binary": "nova-compute", + "host": "host1", + "status": "UNKNOWN" + }, + { + "binary": "nova-compute", + "host": "host2", + "status": "UNKNOWN" + } + ] +} diff --git a/doc/api_samples/os-shelve/v2.77/os-shelve.json b/doc/api_samples/os-shelve/v2.77/os-shelve.json new file mode 100644 index 00000000000..e33b05865ac --- /dev/null +++ b/doc/api_samples/os-shelve/v2.77/os-shelve.json @@ -0,0 +1,3 @@ +{ + "shelve": null +} \ No newline at end of file diff --git a/doc/api_samples/os-shelve/v2.77/os-unshelve-null.json b/doc/api_samples/os-shelve/v2.77/os-unshelve-null.json new file mode 100644 index 00000000000..fd05c2a2fe6 --- /dev/null +++ b/doc/api_samples/os-shelve/v2.77/os-unshelve-null.json @@ -0,0 +1,3 @@ +{ + "unshelve": null +} \ No newline at end of file diff --git a/doc/api_samples/os-shelve/v2.77/os-unshelve.json b/doc/api_samples/os-shelve/v2.77/os-unshelve.json new file mode 100644 index 00000000000..8ca146b5933 --- /dev/null +++ b/doc/api_samples/os-shelve/v2.77/os-unshelve.json @@ -0,0 +1,5 @@ +{ + "unshelve": { + "availability_zone": "us-west" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-all.json b/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-all.json new file mode 100644 index 00000000000..d6a1be5cc64 --- /dev/null +++ b/doc/api_samples/os-simple-tenant-usage/v2.40/simple-tenant-usage-get-all.json @@ -0,0 +1,68 @@ +{ + "tenant_usages": [ + { + "server_usages": [ + { + "ended_at": null, + "flavor": "m1.tiny", + "hours": 1.0, + "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f06", + "local_gb": 1, + "memory_mb": 512, + "name": "instance-3", + "started_at": "2018-10-09T11:29:04.166194", + "state": "active", + "tenant_id": "0000000e737461636b20342065000000", + "uptime": 3600, + "vcpus": 1 + } + ], + "start": "2018-10-09T11:29:04.166194", + "stop": "2018-10-09T12:29:04.166194", + "tenant_id": "0000000e737461636b20342065000000", + "total_hours": 1.0, + "total_local_gb_usage": 1.0, + "total_memory_mb_usage": 512.0, + "total_vcpus_usage": 1.0 + }, + { + "server_usages": [ + { + "ended_at": null, + "flavor": "m1.tiny", + "hours": 1.0, + "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f00", + "local_gb": 1, + "memory_mb": 512, + "name": "instance-1", + "started_at": "2018-10-09T11:29:04.166194", + "state": "active", + "tenant_id": "6f70656e737461636b20342065766572", + "uptime": 3600, + "vcpus": 1 + }, + { + "ended_at": null, + "flavor": "m1.tiny", + "hours": 1.0, + "instance_id": "1f1deceb-17b5-4c04-84c7-e0d4499c8f03", + "local_gb": 1, + "memory_mb": 512, + "name": "instance-2", + "started_at": "2018-10-09T11:29:04.166194", + "state": "active", + "tenant_id": "6f70656e737461636b20342065766572", + "uptime": 3600, + "vcpus": 1 + } + ], + "start": "2018-10-09T11:29:04.166194", + "stop": "2018-10-09T12:29:04.166194", + "tenant_id": "6f70656e737461636b20342065766572", + "total_hours": 2.0, + "total_local_gb_usage": 2.0, + "total_memory_mb_usage": 1024.0, + "total_vcpus_usage": 2.0 + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-tenant-networks/networks-list-res.json b/doc/api_samples/os-tenant-networks/networks-list-res.json index b857e8112af..006663ded1b 100644 --- a/doc/api_samples/os-tenant-networks/networks-list-res.json +++ b/doc/api_samples/os-tenant-networks/networks-list-res.json @@ -1,14 +1,9 @@ { "networks": [ { - "cidr": "10.0.0.0/29", - "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", - "label": "test_0" - }, - { - "cidr": "10.0.0.8/29", - "id": "616fb98f-46ca-475e-917e-2563e5a8cd20", - "label": "test_1" + "cidr": "None", + "id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6", + "label": "private" } ] } diff --git a/doc/api_samples/os-volumes/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/attach-volume-to-server-req.json index 4062687fc3b..f2d5f69bd52 100644 --- a/doc/api_samples/os-volumes/attach-volume-to-server-req.json +++ b/doc/api_samples/os-volumes/attach-volume-to-server-req.json @@ -1,6 +1,6 @@ { "volumeAttachment": { - "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803", - "device": "/dev/vdd" + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "device": "/dev/sdb" } } \ No newline at end of file diff --git a/doc/api_samples/os-volumes/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/attach-volume-to-server-resp.json index 2e512ac9903..5408fb8a995 100644 --- a/doc/api_samples/os-volumes/attach-volume-to-server-resp.json +++ b/doc/api_samples/os-volumes/attach-volume-to-server-resp.json @@ -1,8 +1,8 @@ { "volumeAttachment": { - "device": "/dev/vdd", - "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", - "serverId": "0c92f3f6-c253-4c9b-bd43-e880a8d2eb0a", - "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "802db873-0373-4bdd-a433-d272a539ba18", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } } \ No newline at end of file diff --git a/doc/api_samples/os-volumes/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/list-volume-attachments-resp.json index 9ae9b4a2aa8..3ad77cf7de8 100644 --- a/doc/api_samples/os-volumes/list-volume-attachments-resp.json +++ b/doc/api_samples/os-volumes/list-volume-attachments-resp.json @@ -1,16 +1,16 @@ { "volumeAttachments": [ { - "device": "/dev/sdd", - "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", - "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", - "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" + "device": "/dev/sdc", + "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", + "serverId": "4b293d31-ebd5-4a7f-be03-874b90021e54", + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" }, { - "device": "/dev/sdc", - "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", - "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", - "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "4b293d31-ebd5-4a7f-be03-874b90021e54", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } ] } \ No newline at end of file diff --git a/doc/api_samples/os-volumes/update-volume-req.json b/doc/api_samples/os-volumes/update-volume-req.json index bba735eec83..e5ad47aa3cc 100644 --- a/doc/api_samples/os-volumes/update-volume-req.json +++ b/doc/api_samples/os-volumes/update-volume-req.json @@ -1,5 +1,5 @@ { "volumeAttachment": { - "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f805" + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" } } \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json index 9f49b54d78c..fdf928be694 100644 --- a/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json +++ b/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-req.json @@ -1,6 +1,6 @@ { "volumeAttachment": { - "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", "tag": "foo" } -} +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-resp.json index 5f610bcaebe..1e5aa6b1a63 100644 --- a/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-resp.json +++ b/doc/api_samples/os-volumes/v2.49/attach-volume-to-server-resp.json @@ -1,8 +1,8 @@ { "volumeAttachment": { "device": "/dev/sdb", - "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", - "serverId": "84ffbfa0-daf4-4e23-bf4b-dc532c459d4e", - "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "69d19439-fa5f-4d6e-8b78-1868e7eb93a5", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } -} +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.49/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.49/list-volume-attachments-resp.json new file mode 100644 index 00000000000..18a5aad803a --- /dev/null +++ b/doc/api_samples/os-volumes/v2.49/list-volume-attachments-resp.json @@ -0,0 +1,16 @@ +{ + "volumeAttachments": [ + { + "device": "/dev/sdc", + "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", + "serverId": "1453a6a8-10ec-4797-9b9e-da3c703579d5", + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + }, + { + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "1453a6a8-10ec-4797-9b9e-da3c703579d5", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.49/update-volume-req.json b/doc/api_samples/os-volumes/v2.49/update-volume-req.json new file mode 100644 index 00000000000..e5ad47aa3cc --- /dev/null +++ b/doc/api_samples/os-volumes/v2.49/update-volume-req.json @@ -0,0 +1,5 @@ +{ + "volumeAttachment": { + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.49/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.49/volume-attachment-detail-resp.json new file mode 100644 index 00000000000..af9e64d4c24 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.49/volume-attachment-detail-resp.json @@ -0,0 +1,8 @@ +{ + "volumeAttachment": { + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "9ad0352c-48ff-4290-9db8-3385a676f035", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-req.json new file mode 100644 index 00000000000..fdf928be694 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-req.json @@ -0,0 +1,6 @@ +{ + "volumeAttachment": { + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "tag": "foo" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-resp.json new file mode 100644 index 00000000000..5c03cbc232b --- /dev/null +++ b/doc/api_samples/os-volumes/v2.70/attach-volume-to-server-resp.json @@ -0,0 +1,9 @@ +{ + "volumeAttachment": { + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "70f5c62a-972d-4a8b-abcf-e1375ca7f8c0", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.70/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.70/list-volume-attachments-resp.json new file mode 100644 index 00000000000..f17cc8e2d87 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.70/list-volume-attachments-resp.json @@ -0,0 +1,18 @@ +{ + "volumeAttachments": [ + { + "device": "/dev/sdc", + "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", + "serverId": "68426b0f-511b-4cb3-8169-bba2e7a8bc89", + "tag": null, + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + }, + { + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "68426b0f-511b-4cb3-8169-bba2e7a8bc89", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.70/update-volume-req.json b/doc/api_samples/os-volumes/v2.70/update-volume-req.json new file mode 100644 index 00000000000..e5ad47aa3cc --- /dev/null +++ b/doc/api_samples/os-volumes/v2.70/update-volume-req.json @@ -0,0 +1,5 @@ +{ + "volumeAttachment": { + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.70/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.70/volume-attachment-detail-resp.json new file mode 100644 index 00000000000..650ede480e8 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.70/volume-attachment-detail-resp.json @@ -0,0 +1,9 @@ +{ + "volumeAttachment": { + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "d989feee-002d-40f6-b47d-f0dbee48bbc1", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-req.json new file mode 100644 index 00000000000..b4429e12e96 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-req.json @@ -0,0 +1,7 @@ +{ + "volumeAttachment": { + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "tag": "foo", + "delete_on_termination": true + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-resp.json new file mode 100644 index 00000000000..3a60cdc0d09 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.79/attach-volume-to-server-resp.json @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "09b3b9d1-b8c5-48e1-841d-62c3ef967a88", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json new file mode 100644 index 00000000000..ffe7c0baf1e --- /dev/null +++ b/doc/api_samples/os-volumes/v2.79/list-volume-attachments-resp.json @@ -0,0 +1,20 @@ +{ + "volumeAttachments": [ + { + "delete_on_termination": false, + "device": "/dev/sdc", + "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", + "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", + "tag": null, + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + }, + { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.79/update-volume-req.json b/doc/api_samples/os-volumes/v2.79/update-volume-req.json new file mode 100644 index 00000000000..e5ad47aa3cc --- /dev/null +++ b/doc/api_samples/os-volumes/v2.79/update-volume-req.json @@ -0,0 +1,5 @@ +{ + "volumeAttachment": { + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json new file mode 100644 index 00000000000..4a54243c2b1 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.79/volume-attachment-detail-resp.json @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "2aad99d3-7aa4-41e9-b4e6-3f960b115d68", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json new file mode 100644 index 00000000000..b4429e12e96 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-req.json @@ -0,0 +1,7 @@ +{ + "volumeAttachment": { + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "tag": "foo", + "delete_on_termination": true + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json new file mode 100644 index 00000000000..3a60cdc0d09 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.85/attach-volume-to-server-resp.json @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "09b3b9d1-b8c5-48e1-841d-62c3ef967a88", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json new file mode 100644 index 00000000000..ffe7c0baf1e --- /dev/null +++ b/doc/api_samples/os-volumes/v2.85/list-volume-attachments-resp.json @@ -0,0 +1,20 @@ +{ + "volumeAttachments": [ + { + "delete_on_termination": false, + "device": "/dev/sdc", + "id": "227cc671-f30b-4488-96fd-7d0bf13648d8", + "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", + "tag": null, + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + }, + { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "d5e4ae35-ac0e-4311-a8c5-0ee863e951d9", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json b/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json new file mode 100644 index 00000000000..30105458e7c --- /dev/null +++ b/doc/api_samples/os-volumes/v2.85/update-volume-attachment-delete-flag-req.json @@ -0,0 +1,6 @@ +{ + "volumeAttachment": { + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "delete_on_termination": true + } +} diff --git a/doc/api_samples/os-volumes/v2.85/update-volume-req.json b/doc/api_samples/os-volumes/v2.85/update-volume-req.json new file mode 100644 index 00000000000..e5ad47aa3cc --- /dev/null +++ b/doc/api_samples/os-volumes/v2.85/update-volume-req.json @@ -0,0 +1,5 @@ +{ + "volumeAttachment": { + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json new file mode 100644 index 00000000000..4a54243c2b1 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.85/volume-attachment-detail-resp.json @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "2aad99d3-7aa4-41e9-b4e6-3f960b115d68", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-req.json b/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-req.json new file mode 100644 index 00000000000..b4429e12e96 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-req.json @@ -0,0 +1,7 @@ +{ + "volumeAttachment": { + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "tag": "foo", + "delete_on_termination": true + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-resp.json b/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-resp.json new file mode 100644 index 00000000000..0b37f87012e --- /dev/null +++ b/doc/api_samples/os-volumes/v2.89/attach-volume-to-server-resp.json @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "delete_on_termination": true, + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "7ebed2ce-85b3-40b5-84ae-8cc725c37ed2", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.89/list-volume-attachments-resp.json b/doc/api_samples/os-volumes/v2.89/list-volume-attachments-resp.json new file mode 100644 index 00000000000..9935969fbf2 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.89/list-volume-attachments-resp.json @@ -0,0 +1,22 @@ +{ + "volumeAttachments": [ + { + "attachment_id": "979ce4f8-033a-409d-85e6-6b5c0f6a6302", + "delete_on_termination": false, + "device": "/dev/sdc", + "serverId": "7696780b-3f53-4688-ab25-019bfcbbd806", + "tag": null, + "volumeId": "227cc671-f30b-4488-96fd-7d0bf13648d8", + "bdm_uuid": "c088db45-92b8-49e8-81e2-a1b77a144b3b" + }, + { + "attachment_id": "c5684109-0311-4fca-9814-350e46ab7d2a", + "delete_on_termination": true, + "device": "/dev/sdb", + "serverId": "7696780b-3f53-4688-ab25-019bfcbbd806", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "bdm_uuid": "1aa24536-6fb5-426c-8894-d627f39aa48b" + } + ] +} diff --git a/doc/api_samples/os-volumes/v2.89/update-volume-attachment-delete-flag-req.json b/doc/api_samples/os-volumes/v2.89/update-volume-attachment-delete-flag-req.json new file mode 100644 index 00000000000..a2e17f2b6f0 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.89/update-volume-attachment-delete-flag-req.json @@ -0,0 +1,10 @@ +{ + "volumeAttachment": { + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "fddf0901-8caf-42c9-b496-133c570b171b", + "device": "/dev/sdb", + "tag": "foo", + "delete_on_termination": true + } +} \ No newline at end of file diff --git a/doc/api_samples/os-volumes/v2.89/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/v2.89/volume-attachment-detail-resp.json new file mode 100644 index 00000000000..eda615f9961 --- /dev/null +++ b/doc/api_samples/os-volumes/v2.89/volume-attachment-detail-resp.json @@ -0,0 +1,11 @@ +{ + "volumeAttachment": { + "attachment_id": "721a5c82-5ebc-4c6a-8339-3d33d8d027ed", + "delete_on_termination": true, + "device": "/dev/sdb", + "serverId": "7ebed2ce-85b3-40b5-84ae-8cc725c37ed2", + "tag": "foo", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "bdm_uuid": "c088db45-92b8-49e8-81e2-a1b77a144b3b" + } +} diff --git a/doc/api_samples/os-volumes/volume-attachment-detail-resp.json b/doc/api_samples/os-volumes/volume-attachment-detail-resp.json index 5375033bb9c..41b8f21a88c 100644 --- a/doc/api_samples/os-volumes/volume-attachment-detail-resp.json +++ b/doc/api_samples/os-volumes/volume-attachment-detail-resp.json @@ -1,8 +1,8 @@ { "volumeAttachment": { - "device": "/dev/sdd", - "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", - "serverId": "2390fb4d-1693-45d7-b309-e29c4af16538", - "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" + "device": "/dev/sdb", + "id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "serverId": "1ad6852e-6605-4510-b639-d0bff864b49a", + "volumeId": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" } } \ No newline at end of file diff --git a/doc/api_samples/server-ips/server-ips-network-resp.json b/doc/api_samples/server-ips/server-ips-network-resp.json index 8669202e745..fdb070ac3cd 100644 --- a/doc/api_samples/server-ips/server-ips-network-resp.json +++ b/doc/api_samples/server-ips/server-ips-network-resp.json @@ -1,7 +1,7 @@ { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] diff --git a/doc/api_samples/server-ips/server-ips-resp.json b/doc/api_samples/server-ips/server-ips-resp.json index 8a350056139..f710c5777ed 100644 --- a/doc/api_samples/server-ips/server-ips-resp.json +++ b/doc/api_samples/server-ips/server-ips-resp.json @@ -2,7 +2,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] diff --git a/doc/api_samples/server-migrations/v2.80/live-migrate-server.json b/doc/api_samples/server-migrations/v2.80/live-migrate-server.json new file mode 100644 index 00000000000..c2f5bf6c989 --- /dev/null +++ b/doc/api_samples/server-migrations/v2.80/live-migrate-server.json @@ -0,0 +1,6 @@ +{ + "os-migrateLive": { + "host": null, + "block_migration": "auto" + } +} diff --git a/doc/api_samples/server-migrations/v2.80/migrations-get.json b/doc/api_samples/server-migrations/v2.80/migrations-get.json new file mode 100644 index 00000000000..7de0e63201e --- /dev/null +++ b/doc/api_samples/server-migrations/v2.80/migrations-get.json @@ -0,0 +1,23 @@ +{ + "migration": { + "created_at": "2016-01-29T13:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "memory_total_bytes": 123456, + "memory_processed_bytes": 12345, + "memory_remaining_bytes": 111111, + "disk_total_bytes": 234567, + "disk_processed_bytes": 23456, + "disk_remaining_bytes": 211111, + "updated_at": "2016-01-29T13:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "8dbaa0f0-ab95-4ffe-8cb4-9c89d2ac9d24", + "project_id": "5f705771-3aa9-4f4c-8660-0d9522ffdbea" + } +} diff --git a/doc/api_samples/server-migrations/v2.80/migrations-index.json b/doc/api_samples/server-migrations/v2.80/migrations-index.json new file mode 100644 index 00000000000..460529a5896 --- /dev/null +++ b/doc/api_samples/server-migrations/v2.80/migrations-index.json @@ -0,0 +1,25 @@ +{ + "migrations": [ + { + "created_at": "2016-01-29T13:42:02.000000", + "dest_compute": "compute2", + "dest_host": "1.2.3.4", + "dest_node": "node2", + "id": 1, + "server_uuid": "4cfba335-03d8-49b2-8c52-e69043d1e8fe", + "source_compute": "compute1", + "source_node": "node1", + "status": "running", + "memory_total_bytes": 123456, + "memory_processed_bytes": 12345, + "memory_remaining_bytes": 111111, + "disk_total_bytes": 234567, + "disk_processed_bytes": 23456, + "disk_remaining_bytes": 211111, + "updated_at": "2016-01-29T13:42:02.000000", + "uuid": "12341d4b-346a-40d0-83c6-5f4f6892b650", + "user_id": "8dbaa0f0-ab95-4ffe-8cb4-9c89d2ac9d24", + "project_id": "5f705771-3aa9-4f4c-8660-0d9522ffdbea" + } + ] +} diff --git a/doc/api_samples/servers/server-action-addfloatingip-req.json b/doc/api_samples/servers/server-action-addfloatingip-req.json index e4ad5638ab2..654c6bda4c8 100644 --- a/doc/api_samples/servers/server-action-addfloatingip-req.json +++ b/doc/api_samples/servers/server-action-addfloatingip-req.json @@ -1,6 +1,6 @@ { "addFloatingIp" : { "address": "10.10.10.10", - "fixed_address": "192.168.0.3" + "fixed_address": "192.168.1.30" } } \ No newline at end of file diff --git a/doc/api_samples/servers/server-action-rebuild-resp.json b/doc/api_samples/servers/server-action-rebuild-resp.json index b66dc4ce80d..a021f888a03 100644 --- a/doc/api_samples/servers/server-action-rebuild-resp.json +++ b/doc/api_samples/servers/server-action-rebuild-resp.json @@ -6,7 +6,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -51,6 +51,6 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", - "user_id": "fake" + "user_id": "admin" } } \ No newline at end of file diff --git a/doc/api_samples/servers/server-create-req-v237.json b/doc/api_samples/servers/server-create-req-v237.json index abffb363e43..8b5c272e782 100644 --- a/doc/api_samples/servers/server-create-req-v237.json +++ b/doc/api_samples/servers/server-create-req-v237.json @@ -5,7 +5,7 @@ "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", - "availability_zone": "nova", + "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" diff --git a/doc/api_samples/servers/server-create-req-v257.json b/doc/api_samples/servers/server-create-req-v257.json index c6d8dec2424..7c5011e4fe1 100644 --- a/doc/api_samples/servers/server-create-req-v257.json +++ b/doc/api_samples/servers/server-create-req-v257.json @@ -5,7 +5,7 @@ "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", - "availability_zone": "nova", + "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" diff --git a/doc/api_samples/servers/server-create-req.json b/doc/api_samples/servers/server-create-req.json index 4ac0157a85e..f51255b9065 100644 --- a/doc/api_samples/servers/server-create-req.json +++ b/doc/api_samples/servers/server-create-req.json @@ -5,7 +5,7 @@ "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "1", - "availability_zone": "nova", + "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" diff --git a/doc/api_samples/servers/server-get-resp.json b/doc/api_samples/servers/server-get-resp.json index 6c1e246f7cb..66d1930fb48 100644 --- a/doc/api_samples/servers/server-get-resp.json +++ b/doc/api_samples/servers/server-get-resp.json @@ -5,8 +5,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -50,7 +50,7 @@ "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", @@ -76,6 +76,6 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/server-update-resp.json b/doc/api_samples/servers/server-update-resp.json index 4607e312f23..6c9de44daaf 100644 --- a/doc/api_samples/servers/server-update-resp.json +++ b/doc/api_samples/servers/server-update-resp.json @@ -6,7 +6,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -50,6 +50,6 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:58Z", - "user_id": "fake" + "user_id": "admin" } } \ No newline at end of file diff --git a/doc/api_samples/servers/servers-details-resp.json b/doc/api_samples/servers/servers-details-resp.json index 28a1e98efe1..874164288c1 100644 --- a/doc/api_samples/servers/servers-details-resp.json +++ b/doc/api_samples/servers/servers-details-resp.json @@ -6,8 +6,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -51,7 +51,7 @@ "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", @@ -77,7 +77,7 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", - "user_id": "fake" + "user_id": "admin" } ], "servers_links": [ diff --git a/doc/api_samples/servers/v2.16/server-get-resp.json b/doc/api_samples/servers/v2.16/server-get-resp.json index 8737a0e80d3..99d0155b9f7 100644 --- a/doc/api_samples/servers/v2.16/server-get-resp.json +++ b/doc/api_samples/servers/v2.16/server-get-resp.json @@ -3,8 +3,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -48,7 +48,7 @@ "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", @@ -87,6 +87,6 @@ "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:08Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.16/servers-details-resp.json b/doc/api_samples/servers/v2.16/servers-details-resp.json index 9fc17f6137b..694909da35e 100644 --- a/doc/api_samples/servers/v2.16/servers-details-resp.json +++ b/doc/api_samples/servers/v2.16/servers-details-resp.json @@ -4,8 +4,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -49,7 +49,7 @@ "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", @@ -88,7 +88,7 @@ "host_status": "UP", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-16T02:55:05Z", - "user_id": "fake" + "user_id": "admin" } ], "servers_links": [ diff --git a/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json index b38d40709d1..46b34f09de7 100644 --- a/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json +++ b/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json @@ -5,7 +5,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -53,6 +53,6 @@ "OS-DCF:diskConfig": "AUTO", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.19/server-get-resp.json b/doc/api_samples/servers/v2.19/server-get-resp.json index 3fd3bc93610..f8efde972f7 100644 --- a/doc/api_samples/servers/v2.19/server-get-resp.json +++ b/doc/api_samples/servers/v2.19/server-get-resp.json @@ -5,9 +5,9 @@ "addresses": { "private": [ { - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -88,6 +88,6 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T17:24:15Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.19/server-put-resp.json b/doc/api_samples/servers/v2.19/server-put-resp.json index 29c8f6ac9b3..ede653a057c 100644 --- a/doc/api_samples/servers/v2.19/server-put-resp.json +++ b/doc/api_samples/servers/v2.19/server-put-resp.json @@ -6,7 +6,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -52,6 +52,6 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T19:19:36Z", - "user_id": "fake" + "user_id": "admin" } } \ No newline at end of file diff --git a/doc/api_samples/servers/v2.19/servers-details-resp.json b/doc/api_samples/servers/v2.19/servers-details-resp.json index 37f83b7dd35..dfbd0baacb9 100644 --- a/doc/api_samples/servers/v2.19/servers-details-resp.json +++ b/doc/api_samples/servers/v2.19/servers-details-resp.json @@ -6,9 +6,9 @@ "addresses": { "private": [ { - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -89,7 +89,7 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2015-12-07T19:54:49Z", - "user_id": "fake" + "user_id": "admin" } ], "servers_links": [ diff --git a/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json index 86a7b41c232..781cee5a0c4 100644 --- a/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json +++ b/doc/api_samples/servers/v2.26/server-action-rebuild-resp.json @@ -5,7 +5,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -51,7 +51,7 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", - "user_id": "fake", + "user_id": "admin", "locked": false, "description" : "description of foobar", "tags": ["tag1", "tag2"] diff --git a/doc/api_samples/servers/v2.3/server-get-resp.json b/doc/api_samples/servers/v2.3/server-get-resp.json index 22882a074a3..2bfa311eadd 100644 --- a/doc/api_samples/servers/v2.3/server-get-resp.json +++ b/doc/api_samples/servers/v2.3/server-get-resp.json @@ -5,8 +5,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -50,7 +50,7 @@ "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", @@ -85,6 +85,6 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.3/servers-details-resp.json b/doc/api_samples/servers/v2.3/servers-details-resp.json index 5e3876fd2c9..6cc2a0c880a 100644 --- a/doc/api_samples/servers/v2.3/servers-details-resp.json +++ b/doc/api_samples/servers/v2.3/servers-details-resp.json @@ -6,8 +6,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -51,7 +51,7 @@ "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", @@ -86,7 +86,7 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", - "user_id": "fake" + "user_id": "admin" } ], "servers_links": [ diff --git a/doc/api_samples/servers/v2.32/server-create-req.json b/doc/api_samples/servers/v2.32/server-create-req.json index e4f79a43932..f9078243963 100644 --- a/doc/api_samples/servers/v2.32/server-create-req.json +++ b/doc/api_samples/servers/v2.32/server-create-req.json @@ -3,7 +3,7 @@ "name" : "device-tagging-server", "flavorRef" : "http://openstack.example.com/flavors/1", "networks" : [{ - "uuid" : "ff608d40-75e9-48cb-b745-77bb55b5eaf2", + "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "tag": "nic1" }], "block_device_mapping_v2": [{ diff --git a/doc/api_samples/servers/v2.42/server-create-req.json b/doc/api_samples/servers/v2.42/server-create-req.json index 4b000b235c0..f9078243963 100644 --- a/doc/api_samples/servers/v2.42/server-create-req.json +++ b/doc/api_samples/servers/v2.42/server-create-req.json @@ -3,7 +3,7 @@ "name" : "device-tagging-server", "flavorRef" : "http://openstack.example.com/flavors/1", "networks" : [{ - "uuid" : "ff608d40-75e9-48cb-b745-77bb55b5eaf2", + "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6", "tag": "nic1" }], "block_device_mapping_v2": [{ @@ -15,4 +15,4 @@ "tag": "disk1" }] } -} \ No newline at end of file +} diff --git a/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json index 7069951d97f..790e31d2412 100644 --- a/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json +++ b/doc/api_samples/servers/v2.47/server-action-rebuild-resp.json @@ -5,7 +5,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -54,6 +54,6 @@ "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.47/server-create-req.json b/doc/api_samples/servers/v2.47/server-create-req.json index 4068a9ed1ef..bd5dbca36f9 100644 --- a/doc/api_samples/servers/v2.47/server-create-req.json +++ b/doc/api_samples/servers/v2.47/server-create-req.json @@ -5,7 +5,7 @@ "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "6", - "availability_zone": "nova", + "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" diff --git a/doc/api_samples/servers/v2.47/server-get-resp.json b/doc/api_samples/servers/v2.47/server-get-resp.json index 9983aec3eea..38c28a2d6de 100644 --- a/doc/api_samples/servers/v2.47/server-get-resp.json +++ b/doc/api_samples/servers/v2.47/server-get-resp.json @@ -1,7 +1,7 @@ { "server": { "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", @@ -22,9 +22,9 @@ "addresses": { "private": [ { - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -36,8 +36,7 @@ "disk": 1, "ephemeral": 0, "extra_specs": { - "hw:mem_page_size": "2048", - "hw:cpu_policy": "dedicated" + "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, @@ -92,6 +91,6 @@ "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2017-02-14T19:24:00Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.47/server-update-resp.json b/doc/api_samples/servers/v2.47/server-update-resp.json index abf9e107d84..7857b0e34e7 100644 --- a/doc/api_samples/servers/v2.47/server-update-resp.json +++ b/doc/api_samples/servers/v2.47/server-update-resp.json @@ -6,7 +6,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -53,6 +53,6 @@ "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2012-12-02T02:11:58Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.47/servers-details-resp.json b/doc/api_samples/servers/v2.47/servers-details-resp.json index a9aaea4cff4..67c81e8fc2f 100644 --- a/doc/api_samples/servers/v2.47/servers-details-resp.json +++ b/doc/api_samples/servers/v2.47/servers-details-resp.json @@ -2,7 +2,7 @@ "servers": [ { "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", @@ -23,9 +23,9 @@ "addresses": { "private": [ { - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -37,8 +37,7 @@ "disk": 1, "ephemeral": 0, "extra_specs": { - "hw:mem_page_size": "2048", - "hw:cpu_policy": "dedicated" + "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, @@ -93,7 +92,7 @@ "tags": [], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2017-02-14T19:24:43Z", - "user_id": "fake" + "user_id": "admin" } ], "servers_links": [ diff --git a/doc/api_samples/servers/v2.52/server-create-req.json b/doc/api_samples/servers/v2.52/server-create-req.json index 36d2b4cf5cc..b629e717473 100644 --- a/doc/api_samples/servers/v2.52/server-create-req.json +++ b/doc/api_samples/servers/v2.52/server-create-req.json @@ -5,7 +5,7 @@ "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", - "availability_zone": "nova", + "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" diff --git a/doc/api_samples/servers/v2.52/server-get-resp.json b/doc/api_samples/servers/v2.52/server-get-resp.json index ff651f8547a..ec3ea201210 100644 --- a/doc/api_samples/servers/v2.52/server-get-resp.json +++ b/doc/api_samples/servers/v2.52/server-get-resp.json @@ -1,7 +1,7 @@ { "server": { "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", @@ -22,9 +22,9 @@ "addresses": { "private": [ { - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -89,6 +89,6 @@ "tags": ["tag1", "tag2"], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2017-02-14T19:24:00Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.52/servers-details-resp.json b/doc/api_samples/servers/v2.52/servers-details-resp.json index 98285ddc098..212a2ec1ded 100644 --- a/doc/api_samples/servers/v2.52/servers-details-resp.json +++ b/doc/api_samples/servers/v2.52/servers-details-resp.json @@ -2,7 +2,7 @@ "servers": [ { "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", @@ -23,9 +23,9 @@ "addresses": { "private": [ { - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -90,7 +90,7 @@ "tags": ["tag1", "tag2"], "tenant_id": "6f70656e737461636b20342065766572", "updated": "2017-02-14T19:24:43Z", - "user_id": "fake" + "user_id": "admin" } ], "servers_links": [ diff --git a/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json index 612bd601994..cf809aeb530 100644 --- a/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json +++ b/doc/api_samples/servers/v2.54/server-action-rebuild-resp.json @@ -5,7 +5,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -54,7 +54,7 @@ "OS-DCF:diskConfig": "AUTO", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", - "user_id": "fake", + "user_id": "admin", "tags": [] } } diff --git a/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json index 05225e7dcb2..92b43c45dad 100644 --- a/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json +++ b/doc/api_samples/servers/v2.57/server-action-rebuild-resp.json @@ -5,7 +5,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -54,7 +54,7 @@ "OS-DCF:diskConfig": "AUTO", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-11-14T06:29:02Z", - "user_id": "fake", + "user_id": "admin", "tags": [], "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" } diff --git a/doc/api_samples/servers/v2.57/server-create-req.json b/doc/api_samples/servers/v2.57/server-create-req.json index c6d8dec2424..7c5011e4fe1 100644 --- a/doc/api_samples/servers/v2.57/server-create-req.json +++ b/doc/api_samples/servers/v2.57/server-create-req.json @@ -5,7 +5,7 @@ "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "http://openstack.example.com/flavors/1", - "availability_zone": "nova", + "availability_zone": "us-west", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" diff --git a/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json index 546e7cb8bb4..fa3c34cb6d5 100644 --- a/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json +++ b/doc/api_samples/servers/v2.63/server-action-rebuild-resp.json @@ -6,7 +6,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -17,8 +17,7 @@ "disk": 1, "ephemeral": 0, "extra_specs": { - "hw:cpu_policy": "dedicated", - "hw:mem_page_size": "2048" + "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, @@ -63,7 +62,7 @@ "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2017-10-10T16:06:03Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.63/server-create-req.json b/doc/api_samples/servers/v2.63/server-create-req.json index 5523ce8d349..7a576f02497 100644 --- a/doc/api_samples/servers/v2.63/server-create-req.json +++ b/doc/api_samples/servers/v2.63/server-create-req.json @@ -5,7 +5,7 @@ "name" : "new-server-test", "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", "flavorRef" : "6", - "availability_zone": "nova", + "availability_zone": "%(availability_zone)s", "OS-DCF:diskConfig": "AUTO", "metadata" : { "My Server Name" : "Apache1" diff --git a/doc/api_samples/servers/v2.63/server-get-resp.json b/doc/api_samples/servers/v2.63/server-get-resp.json index 5645499fc77..e47589885bc 100644 --- a/doc/api_samples/servers/v2.63/server-get-resp.json +++ b/doc/api_samples/servers/v2.63/server-get-resp.json @@ -1,7 +1,7 @@ { "server": { "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", @@ -22,9 +22,9 @@ "addresses": { "private": [ { - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -36,8 +36,7 @@ "disk": 1, "ephemeral": 0, "extra_specs": { - "hw:cpu_policy": "dedicated", - "hw:mem_page_size": "2048" + "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, @@ -87,6 +86,6 @@ "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2017-02-14T19:24:00Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.63/server-update-resp.json b/doc/api_samples/servers/v2.63/server-update-resp.json index 5a47c0c7cea..c1d544fed96 100644 --- a/doc/api_samples/servers/v2.63/server-update-resp.json +++ b/doc/api_samples/servers/v2.63/server-update-resp.json @@ -6,7 +6,7 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -17,8 +17,7 @@ "disk": 1, "ephemeral": 0, "extra_specs": { - "hw:cpu_policy": "dedicated", - "hw:mem_page_size": "2048" + "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, @@ -60,6 +59,6 @@ "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2012-12-02T02:11:58Z", - "user_id": "fake" + "user_id": "admin" } } diff --git a/doc/api_samples/servers/v2.63/servers-details-resp.json b/doc/api_samples/servers/v2.63/servers-details-resp.json index 620a7a22342..358439ededc 100644 --- a/doc/api_samples/servers/v2.63/servers-details-resp.json +++ b/doc/api_samples/servers/v2.63/servers-details-resp.json @@ -2,7 +2,7 @@ "servers": [ { "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "compute", "OS-EXT-SRV-ATTR:hostname": "new-server-test", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", @@ -23,9 +23,9 @@ "addresses": { "private": [ { - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", - "addr": "192.168.0.3", + "addr": "192.168.1.30", "version": 4 } ] @@ -37,8 +37,7 @@ "disk": 1, "ephemeral": 0, "extra_specs": { - "hw:cpu_policy": "dedicated", - "hw:mem_page_size": "2048" + "hw:numa_nodes": "1" }, "original_name": "m1.tiny.specs", "ram": 512, @@ -88,7 +87,7 @@ "674736e3-f25c-405c-8362-bbf991e0ce0a" ], "updated": "2017-10-10T15:49:09Z", - "user_id": "fake" + "user_id": "admin" } ], "servers_links": [ diff --git a/doc/api_samples/servers/v2.66/server-create-req.json b/doc/api_samples/servers/v2.66/server-create-req.json new file mode 100644 index 00000000000..59c9101f020 --- /dev/null +++ b/doc/api_samples/servers/v2.66/server-create-req.json @@ -0,0 +1,28 @@ +{ + "server" : { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "name" : "new-server-test", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "6", + "availability_zone": "us-west", + "OS-DCF:diskConfig": "AUTO", + "metadata" : { + "My Server Name" : "Apache1" + }, + "security_groups": [ + { + "name": "default" + } + ], + "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "networks": "auto", + "trusted_image_certificates": [ + "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", + "674736e3-f25c-405c-8362-bbf991e0ce0a" + ] + }, + "OS-SCH-HNT:scheduler_hints": { + "same_host": "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.66/server-create-resp.json b/doc/api_samples/servers/v2.66/server-create-resp.json new file mode 100644 index 00000000000..7400eb33272 --- /dev/null +++ b/doc/api_samples/servers/v2.66/server-create-resp.json @@ -0,0 +1,22 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "adminPass": "wKLKinb9u7GM", + "id": "aab35fd0-b459-4b59-9308-5a23147f3165", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/aab35fd0-b459-4b59-9308-5a23147f3165", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/aab35fd0-b459-4b59-9308-5a23147f3165", + "rel": "bookmark" + } + ], + "security_groups": [ + { + "name": "default" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.66/servers-details-with-changes-before.json b/doc/api_samples/servers/v2.66/servers-details-with-changes-before.json new file mode 100644 index 00000000000..f4c39ac5197 --- /dev/null +++ b/doc/api_samples/servers/v2.66/servers-details-with-changes-before.json @@ -0,0 +1,93 @@ +{ + "servers": [ + { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "us-west", + "OS-EXT-SRV-ATTR:host": "compute", + "OS-EXT-SRV-ATTR:hostname": "new-server-test", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:kernel_id": "", + "OS-EXT-SRV-ATTR:launch_index": 0, + "OS-EXT-SRV-ATTR:ramdisk_id": "", + "OS-EXT-SRV-ATTR:reservation_id": "r-y0w4v32k", + "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", + "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2018-10-10T15:49:09.516729", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.0.1", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2018-10-10T15:49:08Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": { + "hw:numa_nodes": "1" + }, + "original_name": "m1.tiny.specs", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "host_status": "UP", + "id": "569f39f9-7c76-42a1-9c2d-8394e2638a6e", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/569f39f9-7c76-42a1-9c2d-8394e2638a6d", + "rel": "bookmark" + } + ], + "locked": false, + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": [ + "0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8", + "674736e3-f25c-405c-8362-bbf991e0ce0a" + ], + "updated": "2018-10-10T15:49:09Z", + "user_id": "admin" + } + ] +} diff --git a/doc/api_samples/servers/v2.66/servers-list-with-changes-before.json b/doc/api_samples/servers/v2.66/servers-list-with-changes-before.json new file mode 100644 index 00000000000..d86d5ea9485 --- /dev/null +++ b/doc/api_samples/servers/v2.66/servers-list-with-changes-before.json @@ -0,0 +1,18 @@ +{ + "servers": [ + { + "id": "6e3a87e6-a133-452e-86e1-a31291c1b1c8", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6e3a87e6-a133-452e-86e1-a31291c1b1c8", + "rel": "bookmark" + } + ], + "name": "new-server-test" + } + ] +} diff --git a/doc/api_samples/servers/v2.67/server-create-req.json b/doc/api_samples/servers/v2.67/server-create-req.json new file mode 100644 index 00000000000..d8cd28f80c8 --- /dev/null +++ b/doc/api_samples/servers/v2.67/server-create-req.json @@ -0,0 +1,19 @@ +{ + "server" : { + "name" : "bfv-server-with-volume-type", + "flavorRef" : "http://openstack.example.com/flavors/1", + "networks" : [{ + "uuid" : "3cb9bc59-5699-4588-a4b1-b87f96708bc6", + "tag": "nic1" + }], + "block_device_mapping_v2": [{ + "uuid": "70a599e0-31e7-49b7-b260-868f441e862b", + "source_type": "image", + "destination_type": "volume", + "boot_index": 0, + "volume_size": "1", + "tag": "disk1", + "volume_type": "lvm-1" + }] + } +} diff --git a/doc/api_samples/servers/v2.67/server-create-resp.json b/doc/api_samples/servers/v2.67/server-create-resp.json new file mode 100644 index 00000000000..dd0bb9f2284 --- /dev/null +++ b/doc/api_samples/servers/v2.67/server-create-resp.json @@ -0,0 +1,22 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "adminPass": "S5wqy9sPYUvU", + "id": "97108291-2fd7-4dc2-a909-eaae0306a6a9", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/97108291-2fd7-4dc2-a909-eaae0306a6a9", + "rel": "bookmark" + } + ], + "security_groups": [ + { + "name": "default" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.69/server-create-req.json b/doc/api_samples/servers/v2.69/server-create-req.json new file mode 100644 index 00000000000..ae72809bb84 --- /dev/null +++ b/doc/api_samples/servers/v2.69/server-create-req.json @@ -0,0 +1,20 @@ +{ + "server" : { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "name" : "new-server-test", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/flavors/1", + "OS-DCF:diskConfig": "AUTO", + "metadata" : { + "My Server Name" : "Apache1" + }, + "security_groups": [ + { + "name": "default" + } + ], + "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "networks": "auto" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.69/server-create-resp.json b/doc/api_samples/servers/v2.69/server-create-resp.json new file mode 100644 index 00000000000..a5aa94d21c5 --- /dev/null +++ b/doc/api_samples/servers/v2.69/server-create-resp.json @@ -0,0 +1,22 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "adminPass": "mqtDAwb2y7Zh", + "id": "6f81aefe-472a-49d8-ba8d-758a5082c7e5", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/6f81aefe-472a-49d8-ba8d-758a5082c7e5", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/6f81aefe-472a-49d8-ba8d-758a5082c7e5", + "rel": "bookmark" + } + ], + "security_groups": [ + { + "name": "default" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.69/server-get-resp.json b/doc/api_samples/servers/v2.69/server-get-resp.json new file mode 100644 index 00000000000..981cd23f8c0 --- /dev/null +++ b/doc/api_samples/servers/v2.69/server-get-resp.json @@ -0,0 +1,39 @@ +{ + "server": { + "OS-EXT-AZ:availability_zone": "UNKNOWN", + "OS-EXT-STS:power_state": 0, + "created": "2018-12-03T21:06:18Z", + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "id": "33748c23-38dd-4f70-b774-522fc69e7b67", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "status": "UNKNOWN", + "tenant_id": "6f70656e737461636b20342065766572", + "user_id": "admin", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/33748c23-38dd-4f70-b774-522fc69e7b67", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/33748c23-38dd-4f70-b774-522fc69e7b67", + "rel": "bookmark" + } + ] + } +} diff --git a/doc/api_samples/servers/v2.69/servers-details-resp.json b/doc/api_samples/servers/v2.69/servers-details-resp.json new file mode 100644 index 00000000000..83ad414943c --- /dev/null +++ b/doc/api_samples/servers/v2.69/servers-details-resp.json @@ -0,0 +1,20 @@ +{ + "servers": [ + { + "created": "2018-12-03T21:06:18Z", + "id": "b6b0410f-b65f-4473-855e-5d82a71759e0", + "status": "UNKNOWN", + "tenant_id": "6f70656e737461636b20342065766572", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/b6b0410f-b65f-4473-855e-5d82a71759e0", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/b6b0410f-b65f-4473-855e-5d82a71759e0", + "rel": "bookmark" + } + ] + } + ] +} diff --git a/doc/api_samples/servers/v2.69/servers-list-resp.json b/doc/api_samples/servers/v2.69/servers-list-resp.json new file mode 100644 index 00000000000..5a5c988a857 --- /dev/null +++ b/doc/api_samples/servers/v2.69/servers-list-resp.json @@ -0,0 +1,18 @@ +{ + "servers": [ + { + "id": "2e136db7-b4a4-4815-8a00-25d9bfe59617", + "status": "UNKNOWN", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2e136db7-b4a4-4815-8a00-25d9bfe59617", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2e136db7-b4a4-4815-8a00-25d9bfe59617", + "rel": "bookmark" + } + ] + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.71/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.71/server-action-rebuild-resp.json new file mode 100644 index 00000000000..16dd0a10301 --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-action-rebuild-resp.json @@ -0,0 +1,65 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "adminPass": "seekr3t", + "created": "2019-02-28T03:16:19Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "id": "36b2afd5-1684-4d18-a49c-915bf0f5344c", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/36b2afd5-1684-4d18-a49c-915bf0f5344c", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/36b2afd5-1684-4d18-a49c-915bf0f5344c", + "rel": "bookmark" + } + ], + "locked": false, + "metadata": { + "meta_var": "meta_val" + }, + "name": "foobar", + "progress": 0, + "server_groups": [ + "f3d86fe6-4246-4be8-b87c-eb894626c741" + ], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-02-28T03:16:20Z", + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", + "user_id": "admin" + } +} diff --git a/doc/api_samples/servers/v2.71/server-action-rebuild.json b/doc/api_samples/servers/v2.71/server-action-rebuild.json new file mode 100644 index 00000000000..f1431a05062 --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-action-rebuild.json @@ -0,0 +1,14 @@ +{ + "rebuild" : { + "accessIPv4" : "1.2.3.4", + "accessIPv6" : "80fe::", + "OS-DCF:diskConfig": "AUTO", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "name" : "foobar", + "adminPass" : "seekr3t", + "metadata" : { + "meta_var" : "meta_val" + }, + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.71/server-create-req.json b/doc/api_samples/servers/v2.71/server-create-req.json new file mode 100644 index 00000000000..b5a9b238aca --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-create-req.json @@ -0,0 +1,23 @@ +{ + "server" : { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "name" : "new-server-test", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "1", + "OS-DCF:diskConfig": "AUTO", + "metadata" : { + "My Server Name" : "Apache1" + }, + "security_groups": [ + { + "name": "default" + } + ], + "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "networks": "auto" + }, + "OS-SCH-HNT:scheduler_hints": { + "group": "f3d86fe6-4246-4be8-b87c-eb894626c741" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.71/server-create-resp.json b/doc/api_samples/servers/v2.71/server-create-resp.json new file mode 100644 index 00000000000..7ebe2e20a2d --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-create-resp.json @@ -0,0 +1,22 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "adminPass": "DB2bQBhxvq8a", + "id": "84e2b49d-39a9-4d32-9100-e62161c236db", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", + "rel": "bookmark" + } + ], + "security_groups": [ + { + "name": "default" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.71/server-get-down-cell-resp.json b/doc/api_samples/servers/v2.71/server-get-down-cell-resp.json new file mode 100644 index 00000000000..9dded66a72e --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-get-down-cell-resp.json @@ -0,0 +1,42 @@ +{ + "server": { + "OS-EXT-AZ:availability_zone": "UNKNOWN", + "OS-EXT-STS:power_state": 0, + "created": "2019-02-28T03:16:19Z", + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "id": "2669556b-b4a3-41f1-a0c1-f9c7ff75e53c", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "server_groups": [ + "f3d86fe6-4246-4be8-b87c-eb894626c741" + ], + "status": "UNKNOWN", + "tenant_id": "6f70656e737461636b20342065766572", + "user_id": "admin", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2669556b-b4a3-41f1-a0c1-f9c7ff75e53c", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2669556b-b4a3-41f1-a0c1-f9c7ff75e53c", + "rel": "bookmark" + } + ] + } +} diff --git a/doc/api_samples/servers/v2.71/server-get-resp.json b/doc/api_samples/servers/v2.71/server-get-resp.json new file mode 100644 index 00000000000..72e893e2e0f --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-get-resp.json @@ -0,0 +1,89 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:host": "compute", + "OS-EXT-SRV-ATTR:hostname": "new-server-test", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:kernel_id": "", + "OS-EXT-SRV-ATTR:launch_index": 0, + "OS-EXT-SRV-ATTR:ramdisk_id": "", + "OS-EXT-SRV-ATTR:reservation_id": "r-0scisg0g", + "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", + "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2019-02-28T03:16:19.600768", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2019-02-28T03:16:18Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "host_status": "UP", + "id": "84e2b49d-39a9-4d32-9100-e62161c236db", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", + "rel": "bookmark" + } + ], + "locked": false, + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [ + "f3d86fe6-4246-4be8-b87c-eb894626c741" + ], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-02-28T03:16:19Z", + "user_id": "admin" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.71/server-groups-post-req.json b/doc/api_samples/servers/v2.71/server-groups-post-req.json new file mode 100644 index 00000000000..bbdf2ff4c84 --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-groups-post-req.json @@ -0,0 +1,6 @@ +{ + "server_group": { + "name": "test", + "policy": "affinity" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.71/server-groups-post-resp.json b/doc/api_samples/servers/v2.71/server-groups-post-resp.json new file mode 100644 index 00000000000..99b9c98f2d2 --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-groups-post-resp.json @@ -0,0 +1,11 @@ +{ + "server_group": { + "id": "f3d86fe6-4246-4be8-b87c-eb894626c741", + "members": [], + "name": "test", + "policy": "affinity", + "project_id": "6f70656e737461636b20342065766572", + "rules": {}, + "user_id": "admin" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.71/server-update-req.json b/doc/api_samples/servers/v2.71/server-update-req.json new file mode 100644 index 00000000000..3b3995d51e4 --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-update-req.json @@ -0,0 +1,9 @@ +{ + "server": { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "OS-DCF:diskConfig": "AUTO", + "name": "new-server-test", + "description": "Sample description" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.71/server-update-resp.json b/doc/api_samples/servers/v2.71/server-update-resp.json new file mode 100644 index 00000000000..408f0bea4a3 --- /dev/null +++ b/doc/api_samples/servers/v2.71/server-update-resp.json @@ -0,0 +1,62 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "created": "2019-02-28T03:16:19Z", + "description": "Sample description", + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "id": "60e840f8-dd17-476b-bd1d-33785066c496", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/60e840f8-dd17-476b-bd1d-33785066c496", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/60e840f8-dd17-476b-bd1d-33785066c496", + "rel": "bookmark" + } + ], + "locked": false, + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "progress": 0, + "server_groups": [ + "f3d86fe6-4246-4be8-b87c-eb894626c741" + ], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-02-28T03:16:19Z", + "user_id": "admin" + } +} diff --git a/doc/api_samples/servers/v2.73/lock-server-with-reason.json b/doc/api_samples/servers/v2.73/lock-server-with-reason.json new file mode 100644 index 00000000000..c307fb39bf7 --- /dev/null +++ b/doc/api_samples/servers/v2.73/lock-server-with-reason.json @@ -0,0 +1,3 @@ +{ + "lock": {"locked_reason": "I don't want to work"} +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.73/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.73/server-action-rebuild-resp.json new file mode 100644 index 00000000000..d6be9e95e14 --- /dev/null +++ b/doc/api_samples/servers/v2.73/server-action-rebuild-resp.json @@ -0,0 +1,64 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "adminPass": "seekr3t", + "created": "2019-04-23T17:10:22Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "rel": "bookmark" + } + ], + "locked": false, + "locked_reason": null, + "metadata": { + "meta_var": "meta_val" + }, + "name": "foobar", + "progress": 0, + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-04-23T17:10:24Z", + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", + "user_id": "admin" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.73/server-action-rebuild.json b/doc/api_samples/servers/v2.73/server-action-rebuild.json new file mode 100644 index 00000000000..f1431a05062 --- /dev/null +++ b/doc/api_samples/servers/v2.73/server-action-rebuild.json @@ -0,0 +1,14 @@ +{ + "rebuild" : { + "accessIPv4" : "1.2.3.4", + "accessIPv6" : "80fe::", + "OS-DCF:diskConfig": "AUTO", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "name" : "foobar", + "adminPass" : "seekr3t", + "metadata" : { + "meta_var" : "meta_val" + }, + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.73/server-create-req.json b/doc/api_samples/servers/v2.73/server-create-req.json new file mode 100644 index 00000000000..c8ae2eac974 --- /dev/null +++ b/doc/api_samples/servers/v2.73/server-create-req.json @@ -0,0 +1,20 @@ +{ + "server" : { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "name" : "new-server-test", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "1", + "OS-DCF:diskConfig": "AUTO", + "metadata" : { + "My Server Name" : "Apache1" + }, + "security_groups": [ + { + "name": "default" + } + ], + "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "networks": "auto" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.73/server-create-resp.json b/doc/api_samples/servers/v2.73/server-create-resp.json new file mode 100644 index 00000000000..d5ff5974d9c --- /dev/null +++ b/doc/api_samples/servers/v2.73/server-create-resp.json @@ -0,0 +1,22 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "adminPass": "kJTmMkszoB6A", + "id": "ae10adbb-9b5e-4667-9cc5-05ebdc80a941", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/ae10adbb-9b5e-4667-9cc5-05ebdc80a941", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/ae10adbb-9b5e-4667-9cc5-05ebdc80a941", + "rel": "bookmark" + } + ], + "security_groups": [ + { + "name": "default" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.73/server-get-resp.json b/doc/api_samples/servers/v2.73/server-get-resp.json new file mode 100644 index 00000000000..edd30317f57 --- /dev/null +++ b/doc/api_samples/servers/v2.73/server-get-resp.json @@ -0,0 +1,88 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:host": "compute", + "OS-EXT-SRV-ATTR:hostname": "new-server-test", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:kernel_id": "", + "OS-EXT-SRV-ATTR:launch_index": 0, + "OS-EXT-SRV-ATTR:ramdisk_id": "", + "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6", + "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", + "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2019-04-23T15:19:09Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "host_status": "UP", + "id": "0e12087a-7c87-476a-8f84-7398e991cecc", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0e12087a-7c87-476a-8f84-7398e991cecc", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e12087a-7c87-476a-8f84-7398e991cecc", + "rel": "bookmark" + } + ], + "locked": true, + "locked_reason": "I don't want to work", + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-04-23T15:19:11Z", + "user_id": "admin" + } +} diff --git a/doc/api_samples/servers/v2.73/server-update-req.json b/doc/api_samples/servers/v2.73/server-update-req.json new file mode 100644 index 00000000000..3b3995d51e4 --- /dev/null +++ b/doc/api_samples/servers/v2.73/server-update-req.json @@ -0,0 +1,9 @@ +{ + "server": { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "OS-DCF:diskConfig": "AUTO", + "name": "new-server-test", + "description": "Sample description" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.73/server-update-resp.json b/doc/api_samples/servers/v2.73/server-update-resp.json new file mode 100644 index 00000000000..b99333e902d --- /dev/null +++ b/doc/api_samples/servers/v2.73/server-update-resp.json @@ -0,0 +1,61 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "created": "2019-04-23T17:37:48Z", + "description": "Sample description", + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "id": "f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f9a6c4fe-28e0-48a9-b02c-164e4d04d0b2", + "rel": "bookmark" + } + ], + "locked": false, + "locked_reason": null, + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "progress": 0, + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-04-23T17:37:48Z", + "user_id": "admin" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.73/servers-details-resp.json b/doc/api_samples/servers/v2.73/servers-details-resp.json new file mode 100644 index 00000000000..98fcc913063 --- /dev/null +++ b/doc/api_samples/servers/v2.73/servers-details-resp.json @@ -0,0 +1,89 @@ +{ + "servers": [ + { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:host": "compute", + "OS-EXT-SRV-ATTR:hostname": "new-server-test", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:kernel_id": "", + "OS-EXT-SRV-ATTR:launch_index": 0, + "OS-EXT-SRV-ATTR:ramdisk_id": "", + "OS-EXT-SRV-ATTR:reservation_id": "r-l0i0clt2", + "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", + "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2019-04-23T15:19:15.317839", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2019-04-23T15:19:14Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "host_status": "UP", + "id": "2ce4c5b3-2866-4972-93ce-77a2ea46a7f9", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/2ce4c5b3-2866-4972-93ce-77a2ea46a7f9", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/2ce4c5b3-2866-4972-93ce-77a2ea46a7f9", + "rel": "bookmark" + } + ], + "locked": true, + "locked_reason": "I don't want to work", + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-04-23T15:19:15Z", + "user_id": "admin" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json b/doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json new file mode 100644 index 00000000000..43552ed6385 --- /dev/null +++ b/doc/api_samples/servers/v2.74/server-create-req-with-host-and-node.json @@ -0,0 +1,23 @@ +{ + "server" : { + "adminPass": "MySecretPass", + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "name" : "new-server-test", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "6", + "OS-DCF:diskConfig": "AUTO", + "metadata" : { + "My Server Name" : "Apache1" + }, + "security_groups": [ + { + "name": "default" + } + ], + "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "networks": "auto", + "host": "openstack-node-01", + "hypervisor_hostname": "openstack-node-01" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.74/server-create-req-with-only-host.json b/doc/api_samples/servers/v2.74/server-create-req-with-only-host.json new file mode 100644 index 00000000000..aa0dc613b12 --- /dev/null +++ b/doc/api_samples/servers/v2.74/server-create-req-with-only-host.json @@ -0,0 +1,22 @@ +{ + "server" : { + "adminPass": "MySecretPass", + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "name" : "new-server-test", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "6", + "OS-DCF:diskConfig": "AUTO", + "metadata" : { + "My Server Name" : "Apache1" + }, + "security_groups": [ + { + "name": "default" + } + ], + "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "networks": "auto", + "host": "openstack-node-01" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.74/server-create-req-with-only-node.json b/doc/api_samples/servers/v2.74/server-create-req-with-only-node.json new file mode 100644 index 00000000000..ab9ec85350f --- /dev/null +++ b/doc/api_samples/servers/v2.74/server-create-req-with-only-node.json @@ -0,0 +1,22 @@ +{ + "server" : { + "adminPass": "MySecretPass", + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "name" : "new-server-test", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "6", + "OS-DCF:diskConfig": "AUTO", + "metadata" : { + "My Server Name" : "Apache1" + }, + "security_groups": [ + { + "name": "default" + } + ], + "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "networks": "auto", + "hypervisor_hostname": "openstack-node-01" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.74/server-create-resp.json b/doc/api_samples/servers/v2.74/server-create-resp.json new file mode 100644 index 00000000000..7ebe2e20a2d --- /dev/null +++ b/doc/api_samples/servers/v2.74/server-create-resp.json @@ -0,0 +1,22 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "adminPass": "DB2bQBhxvq8a", + "id": "84e2b49d-39a9-4d32-9100-e62161c236db", + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/84e2b49d-39a9-4d32-9100-e62161c236db", + "rel": "bookmark" + } + ], + "security_groups": [ + { + "name": "default" + } + ] + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.75/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.75/server-action-rebuild-resp.json new file mode 100644 index 00000000000..a4421b85e89 --- /dev/null +++ b/doc/api_samples/servers/v2.75/server-action-rebuild-resp.json @@ -0,0 +1,89 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "us-west", + "OS-EXT-SRV-ATTR:host": "compute", + "OS-EXT-SRV-ATTR:hostname": "new-server-test", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:kernel_id": "", + "OS-EXT-SRV-ATTR:launch_index": 0, + "OS-EXT-SRV-ATTR:ramdisk_id": "", + "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6", + "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "adminPass": "seekr3t", + "config_drive": "", + "created": "2019-04-23T17:10:22Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "host_status": "UP", + "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "rel": "bookmark" + } + ], + "locked": false, + "locked_reason": null, + "metadata": { + "meta_var": "meta_val" + }, + "name": "foobar", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-04-23T17:10:24Z", + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", + "user_id": "admin" + } +} diff --git a/doc/api_samples/servers/v2.75/server-action-rebuild.json b/doc/api_samples/servers/v2.75/server-action-rebuild.json new file mode 100644 index 00000000000..f1431a05062 --- /dev/null +++ b/doc/api_samples/servers/v2.75/server-action-rebuild.json @@ -0,0 +1,14 @@ +{ + "rebuild" : { + "accessIPv4" : "1.2.3.4", + "accessIPv6" : "80fe::", + "OS-DCF:diskConfig": "AUTO", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "name" : "foobar", + "adminPass" : "seekr3t", + "metadata" : { + "meta_var" : "meta_val" + }, + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" + } +} \ No newline at end of file diff --git a/doc/api_samples/servers/v2.75/server-update-req.json b/doc/api_samples/servers/v2.75/server-update-req.json new file mode 100644 index 00000000000..1341355ce52 --- /dev/null +++ b/doc/api_samples/servers/v2.75/server-update-req.json @@ -0,0 +1,9 @@ +{ + "server": { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "OS-DCF:diskConfig": "AUTO", + "name": "new-server-test", + "description": "Sample description" + } +} diff --git a/doc/api_samples/servers/v2.75/server-update-resp.json b/doc/api_samples/servers/v2.75/server-update-resp.json new file mode 100644 index 00000000000..0fc5cf237bd --- /dev/null +++ b/doc/api_samples/servers/v2.75/server-update-resp.json @@ -0,0 +1,88 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "us-west", + "OS-EXT-SRV-ATTR:host": "compute", + "OS-EXT-SRV-ATTR:hostname": "new-server-test", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:kernel_id": "", + "OS-EXT-SRV-ATTR:launch_index": 0, + "OS-EXT-SRV-ATTR:ramdisk_id": "", + "OS-EXT-SRV-ATTR:reservation_id": "r-t61j9da6", + "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", + "OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2019-04-23T15:19:10.855016", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2012-12-02T02:11:57Z", + "description": "Sample description", + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "6e84af987b4e7ec1c039b16d21f508f4a505672bd94fb0218b668d07", + "host_status": "UP", + "id": "324dfb7d-f4a9-419a-9a19-237df04b443b", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/324dfb7d-f4a9-419a-9a19-237df04b443b", + "rel": "bookmark" + } + ], + "locked": false, + "locked_reason": null, + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2012-12-02T02:11:58Z", + "user_id": "admin" + } +} diff --git a/doc/api_samples/servers/v2.9/server-get-resp.json b/doc/api_samples/servers/v2.9/server-get-resp.json index c92ff85fd58..25ac6ae3052 100644 --- a/doc/api_samples/servers/v2.9/server-get-resp.json +++ b/doc/api_samples/servers/v2.9/server-get-resp.json @@ -5,8 +5,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -50,7 +50,7 @@ "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", @@ -85,7 +85,7 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:33Z", - "user_id": "fake", + "user_id": "admin", "locked": false } } diff --git a/doc/api_samples/servers/v2.9/servers-details-resp.json b/doc/api_samples/servers/v2.9/servers-details-resp.json index 0ca874f3332..84cb44c1b27 100644 --- a/doc/api_samples/servers/v2.9/servers-details-resp.json +++ b/doc/api_samples/servers/v2.9/servers-details-resp.json @@ -6,8 +6,8 @@ "addresses": { "private": [ { - "addr": "192.168.0.3", - "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff", + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", "OS-EXT-IPS:type": "fixed", "version": 4 } @@ -51,7 +51,7 @@ "name": "new-server-test", "config_drive": "", "OS-DCF:diskConfig": "AUTO", - "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-AZ:availability_zone": "us-west", "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f", "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini", "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", @@ -86,7 +86,7 @@ "status": "ACTIVE", "tenant_id": "6f70656e737461636b20342065766572", "updated": "2013-09-03T04:01:32Z", - "user_id": "fake", + "user_id": "admin", "locked": false } ], diff --git a/doc/api_samples/servers/v2.90/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.90/server-action-rebuild-resp.json new file mode 100644 index 00000000000..d701b55c0a0 --- /dev/null +++ b/doc/api_samples/servers/v2.90/server-action-rebuild-resp.json @@ -0,0 +1,80 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "us-west", + "OS-EXT-SRV-ATTR:hostname": "updated-hostname", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "addr": "192.168.1.30", + "version": 4 + } + ] + }, + "adminPass": "seekr3t", + "config_drive": "", + "created": "2019-04-23T17:10:22Z", + "description": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6", + "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886", + "rel": "bookmark" + } + ], + "locked": false, + "locked_reason": null, + "metadata": { + "meta_var": "meta_val" + }, + "name": "foobar", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2019-04-23T17:10:24Z", + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", + "user_id": "fake" + } +} diff --git a/doc/api_samples/servers/v2.90/server-action-rebuild.json b/doc/api_samples/servers/v2.90/server-action-rebuild.json new file mode 100644 index 00000000000..32148a45be3 --- /dev/null +++ b/doc/api_samples/servers/v2.90/server-action-rebuild.json @@ -0,0 +1,15 @@ +{ + "rebuild" : { + "accessIPv4" : "1.2.3.4", + "accessIPv6" : "80fe::", + "OS-DCF:diskConfig": "AUTO", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "name" : "foobar", + "adminPass" : "seekr3t", + "hostname": "custom-hostname", + "metadata" : { + "meta_var" : "meta_val" + }, + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi" + } +} diff --git a/doc/api_samples/servers/v2.90/server-create-req.json b/doc/api_samples/servers/v2.90/server-create-req.json new file mode 100644 index 00000000000..c0818fd5262 --- /dev/null +++ b/doc/api_samples/servers/v2.90/server-create-req.json @@ -0,0 +1,30 @@ +{ + "server" : { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "name" : "new-server-test", + "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "1", + "availability_zone": "us-west", + "OS-DCF:diskConfig": "AUTO", + "hostname": "custom-hostname", + "metadata" : { + "My Server Name" : "Apache1" + }, + "personality": [ + { + "path": "/etc/banner.txt", + "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" + } + ], + "security_groups": [ + { + "name": "default" + } + ], + "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==" + }, + "OS-SCH-HNT:scheduler_hints": { + "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e" + } +} diff --git a/doc/api_samples/servers/v2.90/server-create-resp.json b/doc/api_samples/servers/v2.90/server-create-resp.json new file mode 100644 index 00000000000..f50e29dd8be --- /dev/null +++ b/doc/api_samples/servers/v2.90/server-create-resp.json @@ -0,0 +1,22 @@ +{ + "server": { + "OS-DCF:diskConfig": "AUTO", + "adminPass": "6NpUwoz2QDRN", + "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", + "rel": "bookmark" + } + ], + "security_groups": [ + { + "name": "default" + } + ] + } +} diff --git a/doc/api_samples/servers/v2.90/server-get-resp.json b/doc/api_samples/servers/v2.90/server-get-resp.json new file mode 100644 index 00000000000..063bdbce78b --- /dev/null +++ b/doc/api_samples/servers/v2.90/server-get-resp.json @@ -0,0 +1,81 @@ +{ + "server": { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "version": 4 + } + ] + }, + "created": "2013-09-03T04:01:32Z", + "description": null, + "locked": false, + "locked_reason": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", + "id": "0e44cc9c-e052-415d-afbf-469b0d384170", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", + "rel": "bookmark" + } + ], + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "config_drive": "", + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:hostname": "custom-hostname", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "os-extended-volumes:volumes_attached": [ + {"id": "volume_id1", "delete_on_termination": false}, + {"id": "volume_id2", "delete_on_termination": false} + ], + "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", + "OS-SRV-USG:terminated_at": null, + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2013-09-03T04:01:33Z", + "user_id": "fake" + } +} diff --git a/doc/api_samples/servers/v2.90/server-update-req.json b/doc/api_samples/servers/v2.90/server-update-req.json new file mode 100644 index 00000000000..348f926cdf8 --- /dev/null +++ b/doc/api_samples/servers/v2.90/server-update-req.json @@ -0,0 +1,8 @@ +{ + "server": { + "accessIPv4": "4.3.2.1", + "accessIPv6": "80fe::", + "OS-DCF:diskConfig": "AUTO", + "hostname" : "new-server-hostname" + } +} diff --git a/doc/api_samples/servers/v2.90/server-update-resp.json b/doc/api_samples/servers/v2.90/server-update-resp.json new file mode 100644 index 00000000000..0dcba8328e7 --- /dev/null +++ b/doc/api_samples/servers/v2.90/server-update-resp.json @@ -0,0 +1,78 @@ +{ + "server": { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "version": 4 + } + ] + }, + "created": "2013-09-03T04:01:32Z", + "description": null, + "locked": false, + "locked_reason": null, + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29", + "id": "0e44cc9c-e052-415d-afbf-469b0d384170", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170", + "rel": "bookmark" + } + ], + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "config_drive": "", + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "us-west", + "OS-EXT-SRV-ATTR:hostname": "new-server-hostname", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "os-extended-volumes:volumes_attached": [], + "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302", + "OS-SRV-USG:terminated_at": null, + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "server_groups": [], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2013-09-03T04:01:33Z", + "user_id": "fake" + } +} diff --git a/doc/api_samples/servers/v2.90/servers-details-resp.json b/doc/api_samples/servers/v2.90/servers-details-resp.json new file mode 100644 index 00000000000..14cb7708eb7 --- /dev/null +++ b/doc/api_samples/servers/v2.90/servers-details-resp.json @@ -0,0 +1,88 @@ +{ + "servers": [ + { + "accessIPv4": "1.2.3.4", + "accessIPv6": "80fe::", + "addresses": { + "private": [ + { + "addr": "192.168.1.30", + "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74", + "OS-EXT-IPS:type": "fixed", + "version": 4 + } + ] + }, + "created": "2013-09-03T04:01:32Z", + "description": "", + "flavor": { + "disk": 1, + "ephemeral": 0, + "extra_specs": {}, + "original_name": "m1.tiny", + "ram": 512, + "swap": 0, + "vcpus": 1 + }, + "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25", + "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb", + "image": { + "id": "70a599e0-31e7-49b7-b260-868f441e862b", + "links": [ + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb", + "rel": "bookmark" + } + ], + "metadata": { + "My Server Name": "Apache1" + }, + "name": "new-server-test", + "config_drive": "", + "locked": false, + "locked_reason": "", + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:hostname": "custom-hostname", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "os-extended-volumes:volumes_attached": [ + {"id": "volume_id1", "delete_on_termination": false}, + {"id": "volume_id2", "delete_on_termination": false} + ], + "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549", + "OS-SRV-USG:terminated_at": null, + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "status": "ACTIVE", + "tags": [], + "tenant_id": "6f70656e737461636b20342065766572", + "trusted_image_certificates": null, + "updated": "2013-09-03T04:01:32Z", + "user_id": "fake" + } + ], + "servers_links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb", + "rel": "next" + } + ] +} diff --git a/doc/api_samples/servers/v2.90/servers-list-resp.json b/doc/api_samples/servers/v2.90/servers-list-resp.json new file mode 100644 index 00000000000..799ef9ba44b --- /dev/null +++ b/doc/api_samples/servers/v2.90/servers-list-resp.json @@ -0,0 +1,24 @@ +{ + "servers": [ + { + "id": "22c91117-08de-4894-9aa9-6ef382400985", + "links": [ + { + "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", + "rel": "self" + }, + { + "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985", + "rel": "bookmark" + } + ], + "name": "new-server-test" + } + ], + "servers_links": [ + { + "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985", + "rel": "next" + } + ] +} \ No newline at end of file diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json index 7e2434ee332..f976225f9c4 100644 --- a/doc/api_samples/versions/v21-version-get-resp.json +++ b/doc/api_samples/versions/v21-version-get-resp.json @@ -19,7 +19,7 @@ } ], "status": "CURRENT", - "version": "2.65", + "version": "2.90", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json index 8fb2d2430cf..327dbd82d66 100644 --- a/doc/api_samples/versions/versions-get-resp.json +++ b/doc/api_samples/versions/versions-get-resp.json @@ -22,7 +22,7 @@ } ], "status": "CURRENT", - "version": "2.65", + "version": "2.90", "min_version": "2.1", "updated": "2013-07-23T11:33:21Z" } diff --git a/doc/api_schemas/config_drive.json b/doc/api_schemas/config_drive.json new file mode 100644 index 00000000000..d4ba5e7d267 --- /dev/null +++ b/doc/api_schemas/config_drive.json @@ -0,0 +1,30 @@ +{ + "anyOf": [ + { + "type": "object", + "properties": { + "meta_data": { + "type": "object" + }, + "network_data": { + "type": "object" + }, + "user_data": { + "type": [ + "object", + "array", + "string", + "null" + ] + } + }, + "additionalProperties": false + }, + { + "type": [ + "string", + "null" + ] + } + ] +} diff --git a/doc/api_schemas/network_data.json b/doc/api_schemas/network_data.json new file mode 100644 index 00000000000..f980973d753 --- /dev/null +++ b/doc/api_schemas/network_data.json @@ -0,0 +1,580 @@ +{ + "$schema": "http://openstack.org/nova/network_data.json#", + "id": "http://openstack.org/nova/network_data.json", + "type": "object", + "title": "OpenStack Nova network metadata schema", + "description": "Schema of Nova instance network configuration information", + "required": [ + "links", + "networks", + "services" + ], + "properties": { + "links": { + "$id": "#/properties/links", + "type": "array", + "title": "L2 interfaces settings", + "items": { + "$id": "#/properties/links/items", + "oneOf": [ + { + "$ref": "#/definitions/l2_link" + }, + { + "$ref": "#/definitions/l2_bond" + }, + { + "$ref": "#/definitions/l2_vlan" + } + ] + } + }, + "networks": { + "$id": "#/properties/networks", + "type": "array", + "title": "L3 networks", + "items": { + "$id": "#/properties/networks/items", + "oneOf": [ + { + "$ref": "#/definitions/l3_ipv4_network" + }, + { + "$ref": "#/definitions/l3_ipv6_network" + } + ] + } + }, + "services": { + "$ref": "#/definitions/services" + } + }, + "definitions": { + "l2_address": { + "$id": "#/definitions/l2_address", + "type": "string", + "pattern": "(?i)^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$", + "title": "L2 interface address", + "examples": [ + "fa:16:3e:9c:bf:3d" + ] + }, + "l2_id": { + "$id": "#/definitions/l2_id", + "type": "string", + "title": "L2 interface ID", + "examples": [ + "eth0" + ] + }, + "l2_mtu": { + "$id": "#/definitions/l2_mtu", + "title": "L2 interface MTU", + "anyOf": [ + { + "type": "number", + "minimum": 1, + "maximum": 65535 + }, + { + "type": "null" + } + ], + "examples": [ + 1500 + ] + }, + "l2_vif_id": { + "$id": "#/definitions/l2_vif_id", + "type": "string", + "title": "Virtual interface ID", + "examples": [ + "cd9f6d46-4a3a-43ab-a466-994af9db96fc" + ] + }, + "l2_link": { + "$id": "#/definitions/l2_link", + "type": "object", + "title": "L2 interface configuration settings", + "required": [ + "ethernet_mac_address", + "id", + "type" + ], + "properties": { + "id": { + "$ref": "#/definitions/l2_id" + }, + "ethernet_mac_address": { + "$ref": "#/definitions/l2_address" + }, + "mtu": { + "$ref": "#/definitions/l2_mtu" + }, + "type": { + "$id": "#/definitions/l2_link/properties/type", + "type": "string", + "enum": [ + "bridge", + "dvs", + "hw_veb", + "hyperv", + "ovs", + "tap", + "vhostuser", + "vif", + "phy" + ], + "title": "Interface type", + "examples": [ + "bridge" + ] + }, + "vif_id": { + "$ref": "#/definitions/l2_vif_id" + } + } + }, + "l2_bond": { + "$id": "#/definitions/l2_bond", + "type": "object", + "title": "L2 bonding interface configuration settings", + "required": [ + "ethernet_mac_address", + "id", + "type", + "bond_mode", + "bond_links" + ], + "properties": { + "id": { + "$ref": "#/definitions/l2_id" + }, + "ethernet_mac_address": { + "$ref": "#/definitions/l2_address" + }, + "mtu": { + "$ref": "#/definitions/l2_mtu" + }, + "type": { + "$id": "#/definitions/l2_bond/properties/type", + "type": "string", + "enum": [ + "bond" + ], + "title": "Interface type", + "examples": [ + "bond" + ] + }, + "vif_id": { + "$ref": "#/definitions/l2_vif_id" + }, + "bond_mode": { + "$id": "#/definitions/bond/properties/bond_mode", + "type": "string", + "title": "Port bonding type", + "enum": [ + "802.3ad", + "balance-rr", + "active-backup", + "balance-xor", + "broadcast", + "balance-tlb", + "balance-alb" + ], + "examples": [ + "802.3ad" + ] + }, + "bond_links": { + "$id": "#/definitions/bond/properties/bond_links", + "type": "array", + "title": "Port bonding links", + "items": { + "$id": "#/definitions/bond/properties/bond_links/items", + "type": "string" + } + } + } + }, + "l2_vlan": { + "$id": "#/definitions/l2_vlan", + "type": "object", + "title": "L2 VLAN interface configuration settings", + "required": [ + "vlan_mac_address", + "id", + "type", + "vlan_link", + "vlan_id" + ], + "properties": { + "id": { + "$ref": "#/definitions/l2_id" + }, + "vlan_mac_address": { + "$ref": "#/definitions/l2_address" + }, + "mtu": { + "$ref": "#/definitions/l2_mtu" + }, + "type": { + "$id": "#/definitions/l2_vlan/properties/type", + "type": "string", + "enum": [ + "vlan" + ], + "title": "VLAN interface type", + "examples": [ + "vlan" + ] + }, + "vif_id": { + "$ref": "#/definitions/l2_vif_id" + }, + "vlan_id": { + "$id": "#/definitions/l2_vlan/properties/vlan_id", + "type": "integer", + "title": "VLAN ID" + }, + "vlan_link": { + "$id": "#/definitions/l2_vlan/properties/vlan_link", + "type": "string", + "title": "VLAN link name" + } + } + }, + "l3_id": { + "$id": "#/definitions/l3_id", + "type": "string", + "title": "Network name", + "examples": [ + "network0" + ] + }, + "l3_link": { + "$id": "#/definitions/l3_link", + "type": "string", + "title": "L2 network link to use for L3 interface", + "examples": [ + "99e88329-f20d-4741-9593-25bf07847b16" + ] + }, + "l3_network_id": { + "$id": "#/definitions/l3_network_id", + "type": "string", + "title": "Network ID", + "examples": [ + "99e88329-f20d-4741-9593-25bf07847b16" + ] + }, + "l3_ipv4_type": { + "$id": "#/definitions/l3_ipv4_type", + "type": "string", + "enum": [ + "ipv4", + "ipv4_dhcp" + ], + "title": "L3 IPv4 network type", + "examples": [ + "ipv4_dhcp" + ] + }, + "l3_ipv6_type": { + "$id": "#/definitions/l3_ipv6_type", + "type": "string", + "enum": [ + "ipv6", + "ipv6_dhcp", + "ipv6_slaac" + ], + "title": "L3 IPv6 network type", + "examples": [ + "ipv6_dhcp" + ] + }, + "l3_ipv4_host": { + "$id": "#/definitions/l3_ipv4_host", + "type": "string", + "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", + "title": "L3 IPv4 host address", + "examples": [ + "192.168.81.99" + ] + }, + "l3_ipv6_host": { + "$id": "#/definitions/l3_ipv6_host", + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(/[0-9]{1,2})?$", + "title": "L3 IPv6 host address", + "examples": [ + "2001:db8:3:4::192.168.81.99" + ] + }, + "l3_ipv4_netmask": { + "$id": "#/definitions/l3_ipv4_netmask", + "type": "string", + "pattern": "^(254|252|248|240|224|192|128|0)\\.0\\.0\\.0|255\\.(254|252|248|240|224|192|128|0)\\.0\\.0|255\\.255\\.(254|252|248|240|224|192|128|0)\\.0|255\\.255\\.255\\.(254|252|248|240|224|192|128|0)$", + "title": "L3 IPv4 network mask", + "examples": [ + "255.255.252.0" + ] + }, + "l3_ipv6_netmask": { + "$id": "#/definitions/l3_ipv6_netmask", + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$", + "title": "L3 IPv6 network mask", + "examples": [ + "ffff:ffff:ffff:ffff::" + ] + }, + "l3_ipv4_nw": { + "$id": "#/definitions/l3_ipv4_nw", + "type": "string", + "pattern": "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$", + "title": "L3 IPv4 network address", + "examples": [ + "0.0.0.0" + ] + }, + "l3_ipv6_nw": { + "$id": "#/definitions/l3_ipv6_nw", + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$", + "title": "L3 IPv6 network address", + "examples": [ + "8000::" + ] + }, + "l3_ipv4_gateway": { + "$id": "#/definitions/l3_ipv4_gateway", + "type": "string", + "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", + "title": "L3 IPv4 gateway address", + "examples": [ + "192.168.200.1" + ] + }, + "l3_ipv6_gateway": { + "$id": "#/definitions/l3_ipv6_gateway", + "type": "string", + "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$", + "title": "L3 IPv6 gateway address", + "examples": [ + "2001:db8:3:4::192.168.81.99" + ] + }, + "l3_ipv4_network_route": { + "$id": "#/definitions/l3_ipv4_network_route", + "type": "object", + "title": "L3 IPv4 routing configuration item", + "required": [ + "gateway", + "netmask", + "network" + ], + "properties": { + "network": { + "$ref": "#/definitions/l3_ipv4_nw" + }, + "netmask": { + "$ref": "#/definitions/l3_ipv4_netmask" + }, + "gateway": { + "$ref": "#/definitions/l3_ipv4_gateway" + }, + "services": { + "$ref": "#/definitions/ipv4_services" + } + } + }, + "l3_ipv6_network_route": { + "$id": "#/definitions/l3_ipv6_network_route", + "type": "object", + "title": "L3 IPv6 routing configuration item", + "required": [ + "gateway", + "netmask", + "network" + ], + "properties": { + "network": { + "$ref": "#/definitions/l3_ipv6_nw" + }, + "netmask": { + "$ref": "#/definitions/l3_ipv6_netmask" + }, + "gateway": { + "$ref": "#/definitions/l3_ipv6_gateway" + }, + "services": { + "$ref": "#/definitions/ipv6_services" + } + } + }, + "l3_ipv4_network": { + "$id": "#/definitions/l3_ipv4_network", + "type": "object", + "title": "L3 IPv4 network configuration", + "required": [ + "id", + "link", + "network_id", + "type" + ], + "properties": { + "id": { + "$ref": "#/definitions/l3_id" + }, + "link": { + "$ref": "#/definitions/l3_link" + }, + "network_id": { + "$ref": "#/definitions/l3_network_id" + }, + "type": { + "$ref": "#/definitions/l3_ipv4_type" + }, + "ip_address": { + "$ref": "#/definitions/l3_ipv4_host" + }, + "netmask": { + "$ref": "#/definitions/l3_ipv4_netmask" + }, + "routes": { + "$id": "#/definitions/l3_ipv4_network/routes", + "type": "array", + "title": "L3 IPv4 network routes", + "items": { + "$ref": "#/definitions/l3_ipv4_network_route" + } + } + } + }, + "l3_ipv6_network": { + "$id": "#/definitions/l3_ipv6_network", + "type": "object", + "title": "L3 IPv6 network configuration", + "required": [ + "id", + "link", + "network_id", + "type" + ], + "properties": { + "id": { + "$ref": "#/definitions/l3_id" + }, + "link": { + "$ref": "#/definitions/l3_link" + }, + "network_id": { + "$ref": "#/definitions/l3_network_id" + }, + "type": { + "$ref": "#/definitions/l3_ipv6_type" + }, + "ip_address": { + "$ref": "#/definitions/l3_ipv6_host" + }, + "netmask": { + "$ref": "#/definitions/l3_ipv6_netmask" + }, + "routes": { + "$id": "#/definitions/properties/l3_ipv6_network/routes", + "type": "array", + "title": "L3 IPv6 network routes", + "items": { + "$ref": "#/definitions/l3_ipv6_network_route" + } + } + } + }, + "ipv4_service": { + "$id": "#/definitions/ipv4_service", + "type": "object", + "title": "Service on a IPv4 network", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "$ref": "#/definitions/l3_ipv4_host" + }, + "type": { + "$id": "#/definitions/ipv4_service/properties/type", + "type": "string", + "enum": [ + "dns" + ], + "title": "Service type", + "examples": [ + "dns" + ] + } + } + }, + "ipv6_service": { + "$id": "#/definitions/ipv6_service", + "type": "object", + "title": "Service on a IPv6 network", + "required": [ + "address", + "type" + ], + "properties": { + "address": { + "$ref": "#/definitions/l3_ipv6_host" + }, + "type": { + "$id": "#/definitions/ipv4_service/properties/type", + "type": "string", + "enum": [ + "dns" + ], + "title": "Service type", + "examples": [ + "dns" + ] + } + } + }, + "ipv4_services": { + "$id": "#/definitions/ipv4_services", + "type": "array", + "title": "Network services on IPv4 network", + "items": { + "$id": "#/definitions/ipv4_services/items", + "$ref": "#/definitions/ipv4_service" + } + }, + "ipv6_services": { + "$id": "#/definitions/ipv6_services", + "type": "array", + "title": "Network services on IPv6 network", + "items": { + "$id": "#/definitions/ipv6_services/items", + "$ref": "#/definitions/ipv6_service" + } + }, + "services": { + "$id": "#/definitions/services", + "type": "array", + "title": "Network services", + "items": { + "$id": "#/definitions/services/items", + "anyOf": [ + { + "$ref": "#/definitions/ipv4_service" + }, + { + "$ref": "#/definitions/ipv6_service" + } + ] + } + } + } +} diff --git a/doc/ext/extra_specs.py b/doc/ext/extra_specs.py new file mode 100644 index 00000000000..534f5fa969e --- /dev/null +++ b/doc/ext/extra_specs.py @@ -0,0 +1,239 @@ +# Copyright 2020, Red Hat, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Display extra specs in documentation. + +Provides a single directive that can be used to list all extra specs validators +and, thus, document all extra specs that nova recognizes and supports. +""" + +import typing as ty + +from docutils import nodes +from docutils.parsers import rst +from docutils.parsers.rst import directives +from docutils import statemachine +from sphinx import addnodes +from sphinx import directives as sphinx_directives +from sphinx import domains +from sphinx import roles +from sphinx.util import logging +from sphinx.util import nodes as sphinx_nodes + +from nova.api.validation.extra_specs import base +from nova.api.validation.extra_specs import validators + +LOG = logging.getLogger(__name__) + + +class ExtraSpecXRefRole(roles.XRefRole): + """Cross reference a extra spec. + + Example:: + + :nova:extra-spec:`hw:cpu_policy` + """ + + def __init__(self): + super(ExtraSpecXRefRole, self).__init__( + warn_dangling=True, + ) + + def process_link(self, env, refnode, has_explicit_title, title, target): + # The anchor for the extra spec link is the extra spec name + return target, target + + +class ExtraSpecDirective(sphinx_directives.ObjectDescription): + """Document an individual extra spec. + + Accepts one required argument - the extra spec name, including the group. + + Example:: + + .. extra-spec:: hw:cpu_policy + """ + + def handle_signature(self, sig, signode): + """Transform an option description into RST nodes.""" + # Insert a node into the output showing the extra spec name + signode += addnodes.desc_name(sig, sig) + signode['allnames'] = [sig] + return sig + + def add_target_and_index(self, firstname, sig, signode): + cached_options = self.env.domaindata['nova']['extra_specs'] + signode['ids'].append(sig) + self.state.document.note_explicit_target(signode) + # Store the location of the option definition for later use in + # resolving cross-references + cached_options[sig] = self.env.docname + + +def _indent(text, count=1): + if not text: + return text + + padding = ' ' * (4 * count) + return padding + text + + +def _format_validator_group_help( + validators: ty.Dict[str, base.ExtraSpecValidator], + summary: bool, +): + """Generate reStructuredText snippets for a group of validators.""" + for validator in validators.values(): + for line in _format_validator_help(validator, summary): + yield line + + +def _format_validator_help( + validator: base.ExtraSpecValidator, + summary: bool, +): + """Generate reStucturedText snippets for the provided validator. + + :param validator: A validator to document. + :type validator: nova.api.validation.extra_specs.base.ExtraSpecValidator + """ + yield f'.. nova:extra-spec:: {validator.name}' + yield '' + + # NOTE(stephenfin): We don't print the pattern, if present, since it's too + # internal. Instead, the description should provide this information in a + # human-readable format + yield _indent(f':Type: {validator.value["type"].__name__}') + + if validator.value.get('min') is not None: + yield _indent(f':Min: {validator.value["min"]}') + + if validator.value.get('max') is not None: + yield _indent(f':Max: {validator.value["max"]}') + + yield '' + + if not summary: + for line in validator.description.splitlines(): + yield _indent(line) + + yield '' + + if validator.deprecated: + yield _indent('.. warning::') + yield _indent( + 'This extra spec has been deprecated and should not be used.', 2 + ) + yield '' + + +class ExtraSpecGroupDirective(rst.Directive): + """Document extra specs belonging to the specified group. + + Accepts one optional argument - the extra spec group - and one option - + whether to show a summary view only (omit descriptions). Example:: + + .. extra-specs:: hw_rng + :summary: + """ + + required_arguments = 0 + optional_arguments = 1 + option_spec = { + 'summary': directives.flag, + } + has_content = False + + def run(self): + result = statemachine.ViewList() + source_name = self.state.document.current_source + + group = self.arguments[0] if self.arguments else None + summary = self.options.get('summary', False) + + if group: + group_validators = { + n.split(':', 1)[1]: v for n, v in validators.VALIDATORS.items() + if ':' in n and n.split(':', 1)[0].split('{')[0] == group + } + else: + group_validators = { + n: v for n, v in validators.VALIDATORS.items() + if ':' not in n + } + + if not group_validators: + LOG.warning("No validators found for group '%s'", group or '') + + for count, line in enumerate( + _format_validator_group_help(group_validators, summary) + ): + result.append(line, source_name, count) + LOG.debug('%5d%s%s', count, ' ' if line else '', line) + + node = nodes.section() + node.document = self.state.document + + sphinx_nodes.nested_parse_with_titles(self.state, result, node) + + return node.children + + +class NovaDomain(domains.Domain): + """nova domain.""" + name = 'nova' + label = 'nova' + object_types = { + 'configoption': domains.ObjType( + 'extra spec', 'spec', + ), + } + directives = { + 'extra-spec': ExtraSpecDirective, + } + roles = { + 'extra-spec': ExtraSpecXRefRole(), + } + initial_data = { + 'extra_specs': {}, + } + + def resolve_xref( + self, env, fromdocname, builder, typ, target, node, contnode, + ): + """Resolve cross-references""" + if typ == 'extra-spec': + return sphinx_nodes.make_refnode( + builder, + fromdocname, + env.domaindata['nova']['extra_specs'][target], + target, + contnode, + target, + ) + return None + + def merge_domaindata(self, docnames, otherdata): + for target, docname in otherdata['extra_specs'].items(): + if docname in docnames: + self.data['extra_specs'][target] = docname + + +def setup(app): + app.add_domain(NovaDomain) + app.add_directive('extra-specs', ExtraSpecGroupDirective) + return { + 'parallel_read_safe': True, + 'parallel_write_safe': True, + } diff --git a/doc/ext/feature_matrix.py b/doc/ext/feature_matrix.py index 4934e5894f1..62f4ec0b943 100644 --- a/doc/ext/feature_matrix.py +++ b/doc/ext/feature_matrix.py @@ -20,10 +20,8 @@ """ +import configparser import re -import sys - -from six.moves import configparser from docutils import nodes from docutils.parsers import rst @@ -159,16 +157,12 @@ def _load_feature_matrix(self): :returns: Matrix instance """ - # SafeConfigParser was deprecated in Python 3.2 - if sys.version_info >= (3, 2): - cfg = configparser.ConfigParser() - else: - cfg = configparser.SafeConfigParser() + cfg = configparser.ConfigParser() env = self.state.document.settings.env filename = self.arguments[0] rel_fpath, fpath = env.relfn2path(filename) with open(fpath) as fp: - cfg.readfp(fp) + cfg.read_file(fp) # This ensures that the docs are rebuilt whenever the # .ini file changes @@ -576,4 +570,8 @@ def _create_notes_paragraph(self, notes): def setup(app): app.add_directive('feature_matrix', FeatureMatrixDirective) - app.add_stylesheet('feature-matrix.css') + app.add_css_file('feature-matrix.css') + return { + 'parallel_read_safe': True, + 'parallel_write_safe': True, + } diff --git a/doc/ext/versioned_notifications.py b/doc/ext/versioned_notifications.py index 7972b872770..244e0783b8e 100644 --- a/doc/ext/versioned_notifications.py +++ b/doc/ext/versioned_notifications.py @@ -61,6 +61,10 @@ def _import_all_notification_packages(self): pkgutil.iter_modules(nova.notifications.objects.__path__)))) def _collect_notifications(self): + # If you do not see your notification sample showing up in the docs + # be sure that the sample filename matches what is registered on the + # versioned notification object class using the + # @base.notification_sample decorator. self._import_all_notification_packages() base.NovaObjectRegistry.register_notification_objects() notifications = {} @@ -157,5 +161,9 @@ def _build_markup(self, notifications): def setup(app): - app.add_directive('versioned_notifications', - VersionedNotificationDirective) + app.add_directive( + 'versioned_notifications', VersionedNotificationDirective) + return { + 'parallel_read_safe': True, + 'parallel_write_safe': True, + } diff --git a/doc/notification_samples/aggregate-cache_images-end.json b/doc/notification_samples/aggregate-cache_images-end.json new file mode 100644 index 00000000000..4c41e0add2f --- /dev/null +++ b/doc/notification_samples/aggregate-cache_images-end.json @@ -0,0 +1,11 @@ +{ + "priority": "INFO", + "payload": { + "$ref": "common_payloads/AggregatePayload.json#", + "nova_object.data": { + "hosts": ["compute"] + } + }, + "event_type": "aggregate.cache_images.end", + "publisher_id": "nova-api:fake-mini" +} diff --git a/doc/notification_samples/aggregate-cache_images-progress.json b/doc/notification_samples/aggregate-cache_images-progress.json new file mode 100644 index 00000000000..f5eaee34476 --- /dev/null +++ b/doc/notification_samples/aggregate-cache_images-progress.json @@ -0,0 +1,20 @@ +{ + "priority": "INFO", + "payload": { + "nova_object.version": "1.0", + "nova_object.namespace": "nova", + "nova_object.name": "AggregateCachePayload", + "nova_object.data": { + "name": "my-aggregate", + "uuid": "788608ec-ebdc-45c5-bc7f-e5f24ab92c80", + "host": "compute", + "total": 1, + "index": 1, + "images_cached": ["155d900f-4e14-4e4c-a73d-069cbf4541e6"], + "images_failed": [], + "id": 1 + } + }, + "event_type": "aggregate.cache_images.progress", + "publisher_id": "nova-conductor:fake-mini" +} diff --git a/doc/notification_samples/aggregate-cache_images-start.json b/doc/notification_samples/aggregate-cache_images-start.json new file mode 100644 index 00000000000..98f38c97664 --- /dev/null +++ b/doc/notification_samples/aggregate-cache_images-start.json @@ -0,0 +1,11 @@ +{ + "priority": "INFO", + "payload": { + "$ref": "common_payloads/AggregatePayload.json#", + "nova_object.data": { + "hosts": ["compute"] + } + }, + "event_type": "aggregate.cache_images.start", + "publisher_id": "nova-api:fake-mini" +} diff --git a/doc/notification_samples/common_payloads/BandwidthPayload.json b/doc/notification_samples/common_payloads/BandwidthPayload.json deleted file mode 100644 index dd1733c464f..00000000000 --- a/doc/notification_samples/common_payloads/BandwidthPayload.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "nova_object.data": { - "network_name": "private-network", - "out_bytes": 0, - "in_bytes": 0 - }, - "nova_object.name": "BandwidthPayload", - "nova_object.namespace": "nova", - "nova_object.version": "1.0" -} diff --git a/doc/notification_samples/common_payloads/ComputeTaskPayload.json b/doc/notification_samples/common_payloads/ComputeTaskPayload.json new file mode 100644 index 00000000000..cecddffb6fe --- /dev/null +++ b/doc/notification_samples/common_payloads/ComputeTaskPayload.json @@ -0,0 +1,25 @@ +{ + "nova_object.version": "1.0", + "nova_object.namespace": "nova", + "nova_object.name": "ComputeTaskPayload", + "nova_object.data": { + "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082", + "reason": {"$ref": "ExceptionPayload.json#"}, + "request_spec": { + "$ref": "RequestSpecPayload.json#", + "nova_object.data": { + "flavor": { + "nova_object.data": { + "extra_specs": { + "hw:numa_cpus.0": "0", + "hw:numa_mem.0": "512", + "hw:numa_nodes": "1" + } + } + }, + "numa_topology": {"$ref": "InstanceNUMATopologyPayload.json#"} + } + }, + "state": "error" + } +} diff --git a/doc/notification_samples/common_payloads/ExceptionPayload.json b/doc/notification_samples/common_payloads/ExceptionPayload.json new file mode 100644 index 00000000000..c9dd8150473 --- /dev/null +++ b/doc/notification_samples/common_payloads/ExceptionPayload.json @@ -0,0 +1,12 @@ +{ + "nova_object.version": "1.1", + "nova_object.namespace": "nova", + "nova_object.name": "ExceptionPayload", + "nova_object.data": { + "function_name": "_schedule_instances", + "module_name": "nova.conductor.manager", + "exception": "NoValidHost", + "exception_message": "No valid host was found. There are not enough hosts available.", + "traceback": "Traceback (most recent call last):\n File \"nova/conductor/manager.py\", line ..." + } +} diff --git a/doc/notification_samples/common_payloads/ImageMetaPayload.json b/doc/notification_samples/common_payloads/ImageMetaPayload.json new file mode 100644 index 00000000000..5ea3ed7f5ad --- /dev/null +++ b/doc/notification_samples/common_payloads/ImageMetaPayload.json @@ -0,0 +1,28 @@ +{ + "nova_object.namespace": "nova", + "nova_object.data": { + "checksum": null, + "container_format": "raw", + "created_at": "2011-01-01T01:02:03Z", + "direct_url": null, + "disk_format": "raw", + "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", + "min_disk": 0, + "min_ram": 0, + "name": "fakeimage123456", + "owner": null, + "properties": {"$ref":"ImageMetaPropsPayload.json#"}, + "protected": false, + "size": 25165824, + "status": "active", + "tags": [ + "tag1", + "tag2" + ], + "updated_at": "2011-01-01T01:02:03Z", + "virtual_size": null, + "visibility": "public" + }, + "nova_object.name": "ImageMetaPayload", + "nova_object.version": "1.0" +} diff --git a/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json new file mode 100644 index 00000000000..ef9d49647db --- /dev/null +++ b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json @@ -0,0 +1,8 @@ +{ + "nova_object.namespace": "nova", + "nova_object.data": { + "hw_architecture": "x86_64" + }, + "nova_object.name": "ImageMetaPropsPayload", + "nova_object.version": "1.8" +} diff --git a/doc/notification_samples/common_payloads/InstanceActionPayload.json b/doc/notification_samples/common_payloads/InstanceActionPayload.json index c0886a65939..4906f1428ed 100644 --- a/doc/notification_samples/common_payloads/InstanceActionPayload.json +++ b/doc/notification_samples/common_payloads/InstanceActionPayload.json @@ -5,5 +5,5 @@ }, "nova_object.name":"InstanceActionPayload", "nova_object.namespace":"nova", - "nova_object.version":"1.7" + "nova_object.version":"1.8" } diff --git a/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json b/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json index 35cf2646478..2d05adadedf 100644 --- a/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json +++ b/doc/notification_samples/common_payloads/InstanceActionRebuildPayload.json @@ -9,5 +9,5 @@ ] }, "nova_object.name": "InstanceActionRebuildPayload", - "nova_object.version": "1.8" + "nova_object.version": "1.9" } diff --git a/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json b/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json index e70dd84b9f8..69703722256 100644 --- a/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json +++ b/doc/notification_samples/common_payloads/InstanceActionRescuePayload.json @@ -4,5 +4,5 @@ "rescue_image_ref": "a2459075-d96c-40d5-893e-577ff92e721c" }, "nova_object.name": "InstanceActionRescuePayload", - "nova_object.version": "1.2" + "nova_object.version": "1.3" } diff --git a/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json b/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json index cf7146b125b..9c32576d69e 100644 --- a/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json +++ b/doc/notification_samples/common_payloads/InstanceActionResizePrepPayload.json @@ -27,5 +27,5 @@ "task_state": "resize_prep" }, "nova_object.name": "InstanceActionResizePrepPayload", - "nova_object.version": "1.2" + "nova_object.version": "1.3" } diff --git a/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json b/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json index 371e1de3ae0..d0dd7b7f5b5 100644 --- a/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json +++ b/doc/notification_samples/common_payloads/InstanceActionSnapshotPayload.json @@ -5,5 +5,5 @@ }, "nova_object.name":"InstanceActionSnapshotPayload", "nova_object.namespace":"nova", - "nova_object.version":"1.8" + "nova_object.version":"1.9" } diff --git a/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json b/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json index 289fd3218ed..50108e82157 100644 --- a/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json +++ b/doc/notification_samples/common_payloads/InstanceActionVolumePayload.json @@ -5,5 +5,5 @@ }, "nova_object.name": "InstanceActionVolumePayload", "nova_object.namespace": "nova", - "nova_object.version": "1.5" + "nova_object.version": "1.6" } \ No newline at end of file diff --git a/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json b/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json index e0445b4375b..ac56306a742 100644 --- a/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json +++ b/doc/notification_samples/common_payloads/InstanceActionVolumeSwapPayload.json @@ -6,5 +6,5 @@ }, "nova_object.name": "InstanceActionVolumeSwapPayload", "nova_object.namespace": "nova", - "nova_object.version": "1.7" + "nova_object.version": "1.8" } diff --git a/doc/notification_samples/common_payloads/InstanceCreatePayload.json b/doc/notification_samples/common_payloads/InstanceCreatePayload.json index 3586c9166ed..c7e6adc981a 100644 --- a/doc/notification_samples/common_payloads/InstanceCreatePayload.json +++ b/doc/notification_samples/common_payloads/InstanceCreatePayload.json @@ -20,8 +20,9 @@ "trusted_image_certificates": [ "cert-id-1", "cert-id-2" - ] + ], + "instance_name": "instance-00000001" }, "nova_object.name":"InstanceCreatePayload", - "nova_object.version": "1.10" + "nova_object.version": "1.12" } diff --git a/doc/notification_samples/common_payloads/InstanceExistsPayload.json b/doc/notification_samples/common_payloads/InstanceExistsPayload.json index 735a8ce206f..d045286feb0 100644 --- a/doc/notification_samples/common_payloads/InstanceExistsPayload.json +++ b/doc/notification_samples/common_payloads/InstanceExistsPayload.json @@ -2,11 +2,9 @@ "$ref": "InstancePayload.json", "nova_object.data":{ "audit_period": {"$ref": "AuditPeriodPayload.json#"}, - "bandwidth": [ - {"$ref": "BandwidthPayload.json#"} - ] + "bandwidth": [] }, "nova_object.name":"InstanceExistsPayload", "nova_object.namespace":"nova", - "nova_object.version":"1.1" + "nova_object.version":"1.2" } diff --git a/doc/notification_samples/common_payloads/InstanceNUMACellPayload.json b/doc/notification_samples/common_payloads/InstanceNUMACellPayload.json new file mode 100644 index 00000000000..221d0d1b9f1 --- /dev/null +++ b/doc/notification_samples/common_payloads/InstanceNUMACellPayload.json @@ -0,0 +1,17 @@ +{ + "nova_object.version": "1.2", + "nova_object.namespace": "nova", + "nova_object.name": "InstanceNUMACellPayload", + "nova_object.data": { + "cpu_pinning_raw": null, + "cpu_policy": null, + "cpu_thread_policy": null, + "cpu_topology": null, + "cpuset": [0], + "pcpuset": [], + "cpuset_reserved": null, + "id": 0, + "memory": 512, + "pagesize": null + } +} diff --git a/doc/notification_samples/common_payloads/InstanceNUMATopologyPayload.json b/doc/notification_samples/common_payloads/InstanceNUMATopologyPayload.json new file mode 100644 index 00000000000..cf28b2f4332 --- /dev/null +++ b/doc/notification_samples/common_payloads/InstanceNUMATopologyPayload.json @@ -0,0 +1,12 @@ +{ + "nova_object.version": "1.0", + "nova_object.namespace": "nova", + "nova_object.name": "InstanceNUMATopologyPayload", + "nova_object.data": { + "cells": [ + {"$ref": "InstanceNUMACellPayload.json#"} + ], + "emulator_threads_policy": null, + "instance_uuid": "75cab9f7-57e2-4bd1-984f-a0383d9ee60e" + } +} diff --git a/doc/notification_samples/common_payloads/InstancePCIRequestsPayload.json b/doc/notification_samples/common_payloads/InstancePCIRequestsPayload.json new file mode 100644 index 00000000000..3ab04139f1c --- /dev/null +++ b/doc/notification_samples/common_payloads/InstancePCIRequestsPayload.json @@ -0,0 +1,9 @@ +{ + "nova_object.version": "1.0", + "nova_object.namespace": "nova", + "nova_object.name": "InstancePCIRequestsPayload", + "nova_object.data":{ + "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082", + "requests": [] + } +} diff --git a/doc/notification_samples/common_payloads/InstancePayload.json b/doc/notification_samples/common_payloads/InstancePayload.json index f92c6b43b08..9053ba76d26 100644 --- a/doc/notification_samples/common_payloads/InstancePayload.json +++ b/doc/notification_samples/common_payloads/InstancePayload.json @@ -37,9 +37,10 @@ "uuid":"178b0921-8f85-4257-88b6-2e743b5a975c", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "action_initiator_user": "fake", - "action_initiator_project": "6f70656e737461636b20342065766572" + "action_initiator_project": "6f70656e737461636b20342065766572", + "locked_reason": null }, "nova_object.name":"InstancePayload", "nova_object.namespace":"nova", - "nova_object.version":"1.7" + "nova_object.version":"1.8" } diff --git a/doc/notification_samples/common_payloads/InstanceUpdatePayload.json b/doc/notification_samples/common_payloads/InstanceUpdatePayload.json index 39072fa1b74..2c1c63d7a65 100644 --- a/doc/notification_samples/common_payloads/InstanceUpdatePayload.json +++ b/doc/notification_samples/common_payloads/InstanceUpdatePayload.json @@ -29,5 +29,5 @@ }, "nova_object.name": "InstanceUpdatePayload", "nova_object.namespace": "nova", - "nova_object.version": "1.8" + "nova_object.version": "1.9" } \ No newline at end of file diff --git a/doc/notification_samples/common_payloads/IpPayload.json b/doc/notification_samples/common_payloads/IpPayload.json index d1f108e8171..bf651c27962 100644 --- a/doc/notification_samples/common_payloads/IpPayload.json +++ b/doc/notification_samples/common_payloads/IpPayload.json @@ -8,7 +8,7 @@ "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "meta": {}, "version": 4, - "label": "private-network", + "label": "private", "device_name": "tapce531f90-19" } } diff --git a/doc/notification_samples/common_payloads/RequestSpecPayload.json b/doc/notification_samples/common_payloads/RequestSpecPayload.json new file mode 100644 index 00000000000..3301c18c589 --- /dev/null +++ b/doc/notification_samples/common_payloads/RequestSpecPayload.json @@ -0,0 +1,24 @@ +{ + "nova_object.namespace": "nova", + "nova_object.data": { + "availability_zone": null, + "flavor": {"$ref": "FlavorPayload.json#"}, + "ignore_hosts": null, + "image": {"$ref": "ImageMetaPayload.json#"}, + "instance_uuid": "d5e6a7b7-80e5-4166-85a3-cd6115201082", + "num_instances": 1, + "numa_topology": null, + "pci_requests": {"$ref": "InstancePCIRequestsPayload.json#"}, + "project_id": "6f70656e737461636b20342065766572", + "scheduler_hints": {}, + "security_groups": ["default"], + "force_hosts": null, + "force_nodes": null, + "instance_group": null, + "requested_destination": null, + "retry": null, + "user_id": "fake" + }, + "nova_object.name": "RequestSpecPayload", + "nova_object.version": "1.1" +} diff --git a/doc/notification_samples/compute_task-build_instances-error.json b/doc/notification_samples/compute_task-build_instances-error.json new file mode 100644 index 00000000000..e904e8c1981 --- /dev/null +++ b/doc/notification_samples/compute_task-build_instances-error.json @@ -0,0 +1,6 @@ +{ + "event_type": "compute_task.build_instances.error", + "payload": {"$ref":"common_payloads/ComputeTaskPayload.json#"}, + "priority": "ERROR", + "publisher_id": "nova-conductor:fake-mini" +} diff --git a/doc/notification_samples/compute_task-migrate_server-error.json b/doc/notification_samples/compute_task-migrate_server-error.json new file mode 100644 index 00000000000..848b4da37f9 --- /dev/null +++ b/doc/notification_samples/compute_task-migrate_server-error.json @@ -0,0 +1,11 @@ +{ + "event_type": "compute_task.migrate_server.error", + "payload": { + "$ref":"common_payloads/ComputeTaskPayload.json#", + "nova_object.data":{ + "state": "active" + } + }, + "priority": "ERROR", + "publisher_id": "nova-conductor:fake-mini" +} diff --git a/doc/notification_samples/compute_task-rebuild_server-error.json b/doc/notification_samples/compute_task-rebuild_server-error.json new file mode 100644 index 00000000000..398600560b7 --- /dev/null +++ b/doc/notification_samples/compute_task-rebuild_server-error.json @@ -0,0 +1,8 @@ +{ + "event_type": "compute_task.rebuild_server.error", + "payload": { + "$ref": "common_payloads/ComputeTaskPayload.json#" + }, + "priority": "ERROR", + "publisher_id": "nova-conductor:fake-mini" +} diff --git a/doc/notification_samples/flavor-update.json b/doc/notification_samples/flavor-update.json index e6af0d70c57..9b2a719f5fd 100644 --- a/doc/notification_samples/flavor-update.json +++ b/doc/notification_samples/flavor-update.json @@ -11,8 +11,7 @@ "disabled": false, "vcpus": 2, "extra_specs": { - "key1": "value1", - "key2": "value2" + "hw:numa_nodes": "2" }, "projects": ["fake_tenant"], "swap": 0, diff --git a/doc/notification_samples/instance-delete-end_compute_down.json b/doc/notification_samples/instance-delete-end_compute_down.json new file mode 100644 index 00000000000..d346095eb34 --- /dev/null +++ b/doc/notification_samples/instance-delete-end_compute_down.json @@ -0,0 +1,15 @@ +{ + "event_type":"instance.delete.end", + "payload":{ + "$ref":"common_payloads/InstanceActionPayload.json#", + "nova_object.data":{ + "block_devices":[], + "deleted_at":"2012-10-29T13:42:11Z", + "ip_addresses":[], + "state":"deleted", + "terminated_at":"2012-10-29T13:42:11Z" + } + }, + "priority":"INFO", + "publisher_id":"nova-api:fake-mini" +} diff --git a/doc/notification_samples/instance-delete-end_not_scheduled.json b/doc/notification_samples/instance-delete-end_not_scheduled.json new file mode 100644 index 00000000000..1fd3c6959f8 --- /dev/null +++ b/doc/notification_samples/instance-delete-end_not_scheduled.json @@ -0,0 +1,20 @@ +{ + "event_type":"instance.delete.end", + "payload":{ + "$ref":"common_payloads/InstanceActionPayload.json#", + "nova_object.data":{ + "availability_zone": null, + "block_devices":[], + "deleted_at":"2012-10-29T13:42:11Z", + "host":null, + "ip_addresses":[], + "launched_at":null, + "node":null, + "power_state":"pending", + "state":"deleted", + "terminated_at":"2012-10-29T13:42:11Z" + } + }, + "priority":"INFO", + "publisher_id":"nova-api:fake-mini" +} diff --git a/doc/notification_samples/instance-delete-start_compute_down.json b/doc/notification_samples/instance-delete-start_compute_down.json new file mode 100644 index 00000000000..e3ceaf56691 --- /dev/null +++ b/doc/notification_samples/instance-delete-start_compute_down.json @@ -0,0 +1,11 @@ +{ + "event_type":"instance.delete.start", + "payload":{ + "$ref":"common_payloads/InstanceActionPayload.json#", + "nova_object.data":{ + "task_state":"deleting" + } + }, + "priority":"INFO", + "publisher_id":"nova-api:fake-mini" +} diff --git a/doc/notification_samples/instance-delete-start_not_scheduled.json b/doc/notification_samples/instance-delete-start_not_scheduled.json new file mode 100644 index 00000000000..60597f9e851 --- /dev/null +++ b/doc/notification_samples/instance-delete-start_not_scheduled.json @@ -0,0 +1,19 @@ +{ + "event_type":"instance.delete.start", + "payload":{ + "$ref":"common_payloads/InstanceActionPayload.json#", + "nova_object.data":{ + "availability_zone": null, + "block_devices":[], + "host":null, + "ip_addresses":[], + "launched_at":null, + "node":null, + "power_state":"pending", + "state":"error", + "task_state":"deleting" + } + }, + "priority":"INFO", + "publisher_id":"nova-api:fake-mini" +} diff --git a/doc/notification_samples/instance-evacuate.json b/doc/notification_samples/instance-evacuate.json index c3251182c41..57f603de093 100644 --- a/doc/notification_samples/instance-evacuate.json +++ b/doc/notification_samples/instance-evacuate.json @@ -5,7 +5,6 @@ "nova_object.data": { "host": "host2", "node": "host2", - "power_state": "pending", "task_state": "rebuilding", "action_initiator_user": "admin" } diff --git a/doc/notification_samples/instance-interface_attach-end.json b/doc/notification_samples/instance-interface_attach-end.json index 273cd96f0e0..8fb0ffb6f63 100644 --- a/doc/notification_samples/instance-interface_attach-end.json +++ b/doc/notification_samples/instance-interface_attach-end.json @@ -11,7 +11,7 @@ "device_name": "tapce531f90-19", "address": "192.168.1.3", "version": 4, - "label": "private-network", + "label": "private", "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "mac": "fa:16:3e:4c:2c:30", "meta": {} @@ -25,7 +25,7 @@ "device_name": "tap88dae9fa-0d", "address": "192.168.1.30", "version": 4, - "label": "private-network", + "label": "private", "port_uuid": "88dae9fa-0dc6-49e3-8c29-3abc41e99ac9", "mac": "00:0c:29:0d:11:74", "meta": {} diff --git a/doc/notification_samples/instance-interface_detach-start.json b/doc/notification_samples/instance-interface_detach-start.json index 6de3067dfe6..591b34c58a5 100644 --- a/doc/notification_samples/instance-interface_detach-start.json +++ b/doc/notification_samples/instance-interface_detach-start.json @@ -10,7 +10,7 @@ "device_name": "tapce531f90-19", "address": "192.168.1.3", "version": 4, - "label": "private-network", + "label": "private", "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "mac": "fa:16:3e:4c:2c:30", "meta": {} @@ -24,7 +24,7 @@ "device_name": "tap88dae9fa-0d", "address": "192.168.1.30", "version": 4, - "label": "private-network", + "label": "private", "port_uuid": "88dae9fa-0dc6-49e3-8c29-3abc41e99ac9", "mac": "00:0c:29:0d:11:74", "meta": {} diff --git a/doc/notification_samples/instance-live_migration_post_dest-end.json b/doc/notification_samples/instance-live_migration_post_dest-end.json index fe3e08bf6ed..e98d2dabd7c 100644 --- a/doc/notification_samples/instance-live_migration_post_dest-end.json +++ b/doc/notification_samples/instance-live_migration_post_dest-end.json @@ -5,7 +5,6 @@ "nova_object.data":{ "host": "host2", "node": "host2", - "power_state": "pending", "action_initiator_user": "admin" } }, diff --git a/doc/notification_samples/instance-live_migration_rollback-start.json b/doc/notification_samples/instance-live_migration_rollback-start.json index 5bffa6057e6..148958d3d59 100644 --- a/doc/notification_samples/instance-live_migration_rollback-start.json +++ b/doc/notification_samples/instance-live_migration_rollback-start.json @@ -3,7 +3,8 @@ "payload":{ "$ref":"common_payloads/InstanceActionPayload.json#", "nova_object.data":{ - "action_initiator_user": "admin" + "action_initiator_user": "admin", + "task_state": "migrating" } }, "priority":"INFO", diff --git a/doc/notification_samples/instance-live_migration_rollback_dest-end.json b/doc/notification_samples/instance-live_migration_rollback_dest-end.json index fe36e55be32..745c6990cae 100644 --- a/doc/notification_samples/instance-live_migration_rollback_dest-end.json +++ b/doc/notification_samples/instance-live_migration_rollback_dest-end.json @@ -3,7 +3,8 @@ "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { - "action_initiator_user": "admin" + "action_initiator_user": "admin", + "task_state": "migrating" } }, "priority": "INFO", diff --git a/doc/notification_samples/instance-live_migration_rollback_dest-start.json b/doc/notification_samples/instance-live_migration_rollback_dest-start.json index 422f7914d35..32858d32526 100644 --- a/doc/notification_samples/instance-live_migration_rollback_dest-start.json +++ b/doc/notification_samples/instance-live_migration_rollback_dest-start.json @@ -3,7 +3,8 @@ "payload": { "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data": { - "action_initiator_user": "admin" + "action_initiator_user": "admin", + "task_state": "migrating" } }, "priority": "INFO", diff --git a/doc/notification_samples/instance-lock-with-reason.json b/doc/notification_samples/instance-lock-with-reason.json new file mode 100644 index 00000000000..45a6847f54f --- /dev/null +++ b/doc/notification_samples/instance-lock-with-reason.json @@ -0,0 +1,12 @@ +{ + "event_type":"instance.lock", + "payload":{ + "$ref": "common_payloads/InstanceActionPayload.json#", + "nova_object.data":{ + "locked":true, + "locked_reason":"global warming" + } + }, + "priority":"INFO", + "publisher_id":"nova-api:fake-mini" +} diff --git a/doc/notification_samples/instance-lock.json b/doc/notification_samples/instance-lock.json index d542d947360..568f68b99fe 100644 --- a/doc/notification_samples/instance-lock.json +++ b/doc/notification_samples/instance-lock.json @@ -3,7 +3,8 @@ "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ - "locked":true + "locked":true, + "locked_reason": null } }, "priority":"INFO", diff --git a/doc/notification_samples/instance-shelve_offload-end.json b/doc/notification_samples/instance-shelve_offload-end.json index d7a563557cf..05df5ddd617 100644 --- a/doc/notification_samples/instance-shelve_offload-end.json +++ b/doc/notification_samples/instance-shelve_offload-end.json @@ -3,6 +3,7 @@ "payload":{ "$ref": "common_payloads/InstanceActionPayload.json#", "nova_object.data":{ + "availability_zone": null, "state": "shelved_offloaded", "power_state": "shutdown", "host": null, diff --git a/doc/notification_samples/instance-soft_delete-end.json b/doc/notification_samples/instance-soft_delete-end.json index 632a18369d0..eb25097b686 100644 --- a/doc/notification_samples/instance-soft_delete-end.json +++ b/doc/notification_samples/instance-soft_delete-end.json @@ -8,5 +8,5 @@ } }, "priority":"INFO", - "publisher_id":"nova-compute:compute" + "publisher_id":"nova-compute:fake-mini" } diff --git a/doc/notification_samples/instance-soft_delete-start.json b/doc/notification_samples/instance-soft_delete-start.json index 8557860aa81..d60e3dfd002 100644 --- a/doc/notification_samples/instance-soft_delete-start.json +++ b/doc/notification_samples/instance-soft_delete-start.json @@ -8,5 +8,5 @@ } }, "priority":"INFO", - "publisher_id":"nova-compute:compute" + "publisher_id":"nova-compute:fake-mini" } diff --git a/doc/notification_samples/libvirt-connect-error.json b/doc/notification_samples/libvirt-connect-error.json new file mode 100644 index 00000000000..1d29ac5fa5a --- /dev/null +++ b/doc/notification_samples/libvirt-connect-error.json @@ -0,0 +1,25 @@ +{ + "event_type": "libvirt.connect.error", + "payload": { + "nova_object.data": { + "reason": { + "nova_object.data": { + "exception": "libvirtError", + "exception_message": "Sample exception for versioned notification test.", + "function_name": "_get_connection", + "module_name": "nova.virt.libvirt.host", + "traceback": "Traceback (most recent call last):\n File \"nova/virt/libvirt/host.py\", line ..." + }, + "nova_object.name": "ExceptionPayload", + "nova_object.namespace": "nova", + "nova_object.version": "1.1" + }, + "ip": "10.0.2.15" + }, + "nova_object.name": "LibvirtErrorPayload", + "nova_object.namespace": "nova", + "nova_object.version": "1.0" + }, + "priority": "ERROR", + "publisher_id": "nova-compute:compute" +} diff --git a/doc/notification_samples/scheduler-select_destinations-end.json b/doc/notification_samples/scheduler-select_destinations-end.json new file mode 100644 index 00000000000..76535710f1a --- /dev/null +++ b/doc/notification_samples/scheduler-select_destinations-end.json @@ -0,0 +1,6 @@ +{ + "priority": "INFO", + "payload": {"$ref": "common_payloads/RequestSpecPayload.json#"}, + "event_type": "scheduler.select_destinations.end", + "publisher_id": "nova-scheduler:fake-mini" +} diff --git a/doc/notification_samples/scheduler-select_destinations-start.json b/doc/notification_samples/scheduler-select_destinations-start.json new file mode 100644 index 00000000000..b5cacc141ba --- /dev/null +++ b/doc/notification_samples/scheduler-select_destinations-start.json @@ -0,0 +1,6 @@ +{ + "priority": "INFO", + "payload": {"$ref": "common_payloads/RequestSpecPayload.json#"}, + "event_type": "scheduler.select_destinations.start", + "publisher_id": "nova-scheduler:fake-mini" +} diff --git a/doc/notification_samples/volume-usage.json b/doc/notification_samples/volume-usage.json new file mode 100644 index 00000000000..03b89d34d6b --- /dev/null +++ b/doc/notification_samples/volume-usage.json @@ -0,0 +1,22 @@ +{ + "event_type": "volume.usage", + "payload": { + "nova_object.data": { + "availability_zone": "nova", + "instance_uuid": "88fde343-13a8-4047-84fb-2657d5e702f9", + "last_refreshed": "2012-10-29T13:42:11Z", + "project_id": "6f70656e737461636b20342065766572", + "read_bytes": 0, + "reads": 0, + "user_id": "fake", + "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113", + "write_bytes": 0, + "writes": 0 + }, + "nova_object.name": "VolumeUsagePayload", + "nova_object.namespace": "nova", + "nova_object.version": "1.0" + }, + "priority": "INFO", + "publisher_id": "nova-compute:compute" +} diff --git a/doc/requirements.txt b/doc/requirements.txt index fc1af5c4c94..df112fe733f 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,15 +1,16 @@ # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD +sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-actdiag>=0.8.5 # BSD sphinxcontrib-seqdiag>=0.8.4 # BSD -sphinx-feature-classification>=0.2.0 # Apache-2.0 +sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD +sphinx-feature-classification>=1.1.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 -openstackdocstheme>=1.19.0 # Apache-2.0 +openstackdocstheme>=2.2.0 # Apache-2.0 # releasenotes -reno>=2.5.0 # Apache-2.0 +reno>=3.1.0 # Apache-2.0 # redirect tests in docs whereto>=0.3.0 # Apache-2.0 diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess index 529f1ba33e2..5d361fc91c9 100644 --- a/doc/source/_extra/.htaccess +++ b/doc/source/_extra/.htaccess @@ -1,13 +1,7 @@ -# The following is generated with: -# -# git log --follow --name-status --format='%H' 2d0dfc632f.. -- doc/source | \ -# grep ^R | grep .rst | cut -f2- | \ -# sed -e 's|doc/source/|redirectmatch 301 ^/nova/([^/]+)/|' -e 's|doc/source/|/nova/$1/|' -e 's/.rst/.html$/' -e 's/.rst/.html/' | \ -# sort - redirectmatch 301 ^/nova/([^/]+)/addmethod.openstackapi.html$ /nova/$1/contributor/api-2.html redirectmatch 301 ^/nova/([^/]+)/admin/flavors2.html$ /nova/$1/admin/flavors.html redirectmatch 301 ^/nova/([^/]+)/admin/numa.html$ /nova/$1/admin/cpu-topologies.html +redirectmatch 301 ^/nova/([^/]+)/admin/quotas2.html$ /nova/$1/admin/quotas.html redirectmatch 301 ^/nova/([^/]+)/aggregates.html$ /nova/$1/user/aggregates.html redirectmatch 301 ^/nova/([^/]+)/api_microversion_dev.html$ /nova/$1/contributor/microversions.html redirectmatch 301 ^/nova/([^/]+)/api_microversion_history.html$ /nova/$1/reference/api-microversion-history.html @@ -21,12 +15,12 @@ redirectmatch 301 ^/nova/([^/]+)/conductor.html$ /nova/$1/user/conductor.html redirectmatch 301 ^/nova/([^/]+)/development.environment.html$ /nova/$1/contributor/development-environment.html redirectmatch 301 ^/nova/([^/]+)/devref/api.html /nova/$1/contributor/api.html redirectmatch 301 ^/nova/([^/]+)/devref/cells.html /nova/$1/user/cells.html -redirectmatch 301 ^/nova/([^/]+)/devref/filter_scheduler.html /nova/$1/user/filter-scheduler.html +redirectmatch 301 ^/nova/([^/]+)/devref/filter_scheduler.html /nova/$1/admin/scheduling.html # catch all, if we hit something in devref assume it moved to # reference unless we have already triggered a hit above. redirectmatch 301 ^/nova/([^/]+)/devref/([^/]+).html /nova/$1/reference/$2.html redirectmatch 301 ^/nova/([^/]+)/feature_classification.html$ /nova/$1/user/feature-classification.html -redirectmatch 301 ^/nova/([^/]+)/filter_scheduler.html$ /nova/$1/user/filter-scheduler.html +redirectmatch 301 ^/nova/([^/]+)/filter_scheduler.html$ /nova/$1/admin/scheduling.html redirectmatch 301 ^/nova/([^/]+)/gmr.html$ /nova/$1/reference/gmr.html redirectmatch 301 ^/nova/([^/]+)/how_to_get_involved.html$ /nova/$1/contributor/how-to-get-involved.html redirectmatch 301 ^/nova/([^/]+)/i18n.html$ /nova/$1/reference/i18n.html @@ -38,10 +32,7 @@ redirectmatch 301 ^/nova/([^/]+)/man/nova-cells.html$ /nova/$1/cli/nova-cells.ht # this is gone and never coming back, indicate that to the end users redirectmatch 301 ^/nova/([^/]+)/man/nova-compute.html$ /nova/$1/cli/nova-compute.html redirectmatch 301 ^/nova/([^/]+)/man/nova-conductor.html$ /nova/$1/cli/nova-conductor.html -redirectmatch 301 ^/nova/([^/]+)/man/nova-console.html$ /nova/$1/cli/nova-console.html -redirectmatch 301 ^/nova/([^/]+)/man/nova-consoleauth.html$ /nova/$1/cli/nova-consoleauth.html redirectmatch 301 ^/nova/([^/]+)/man/nova-dhcpbridge.html$ /nova/$1/cli/nova-dhcpbridge.html -redirectmatch 301 ^/nova/([^/]+)/man/nova-idmapshift.html$ /nova/$1/cli/nova-idmapshift.html redirectmatch 301 ^/nova/([^/]+)/man/nova-manage.html$ /nova/$1/cli/nova-manage.html redirectmatch 301 ^/nova/([^/]+)/man/nova-network.html$ /nova/$1/cli/nova-network.html redirectmatch 301 ^/nova/([^/]+)/man/nova-novncproxy.html$ /nova/$1/cli/nova-novncproxy.html @@ -50,7 +41,6 @@ redirectmatch 301 ^/nova/([^/]+)/man/nova-scheduler.html$ /nova/$1/cli/nova-sche redirectmatch 301 ^/nova/([^/]+)/man/nova-serialproxy.html$ /nova/$1/cli/nova-serialproxy.html redirectmatch 301 ^/nova/([^/]+)/man/nova-spicehtml5proxy.html$ /nova/$1/cli/nova-spicehtml5proxy.html redirectmatch 301 ^/nova/([^/]+)/man/nova-status.html$ /nova/$1/cli/nova-status.html -redirectmatch 301 ^/nova/([^/]+)/man/nova-xvpvncproxy.html$ /nova/$1/cli/nova-xvpvncproxy.html redirectmatch 301 ^/nova/([^/]+)/notifications.html$ /nova/$1/reference/notifications.html redirectmatch 301 ^/nova/([^/]+)/placement.html$ /nova/$1/user/placement.html redirectmatch 301 ^/nova/([^/]+)/placement_dev.html$ /nova/$1/contributor/placement.html @@ -72,8 +62,22 @@ redirectmatch 301 ^/nova/([^/]+)/testing/libvirt-numa.html$ /nova/$1/contributor redirectmatch 301 ^/nova/([^/]+)/testing/serial-console.html$ /nova/$1/contributor/testing/serial-console.html redirectmatch 301 ^/nova/([^/]+)/testing/zero-downtime-upgrade.html$ /nova/$1/contributor/testing/zero-downtime-upgrade.html redirectmatch 301 ^/nova/([^/]+)/threading.html$ /nova/$1/reference/threading.html -redirectmatch 301 ^/nova/([^/]+)/upgrade.html$ /nova/$1/user/upgrade.html -redirectmatch 301 ^/nova/([^/]+)/vendordata.html$ /nova/$1/user/vendordata.html +redirectmatch 301 ^/nova/([^/]+)/upgrade.html$ /nova/$1/admin/upgrades.html +redirectmatch 301 ^/nova/([^/]+)/user/aggregates.html$ /nova/$1/admin/aggregates.html +redirectmatch 301 ^/nova/([^/]+)/user/cellsv2_layout.html$ /nova/$1/user/cellsv2-layout.html +redirectmatch 301 ^/nova/([^/]+)/user/config-drive.html$ /nova/$1/user/metadata.html +redirectmatch 301 ^/nova/([^/]+)/user/filter-scheduler.html$ /nova/$1/admin/scheduling.html +redirectmatch 301 ^/nova/([^/]+)/user/metadata-service.html$ /nova/$1/user/metadata.html +redirectmatch 301 ^/nova/([^/]+)/user/placement.html$ /placement/$1/ +redirectmatch 301 ^/nova/([^/]+)/user/upgrade.html$ /nova/$1/admin/upgrades.html +redirectmatch 301 ^/nova/([^/]+)/user/user-data.html$ /nova/$1/user/metadata.html +redirectmatch 301 ^/nova/([^/]+)/user/vendordata.html$ /nova/$1/user/metadata.html +redirectmatch 301 ^/nova/([^/]+)/vendordata.html$ /nova/$1/user/metadata.html redirectmatch 301 ^/nova/([^/]+)/vmstates.html$ /nova/$1/reference/vm-states.html redirectmatch 301 ^/nova/([^/]+)/wsgi.html$ /nova/$1/user/wsgi.html -redirectmatch 301 ^/nova/([^/]+)/user/cellsv2_layout.html$ /nova/$1/user/cellsv2-layout.html +redirectmatch 301 ^/nova/([^/]+)/admin/adv-config.html$ /nova/$1/admin/index.html +redirectmatch 301 ^/nova/([^/]+)/admin/configuration/schedulers.html$ /nova/$1/admin/scheduling.html +redirectmatch 301 ^/nova/([^/]+)/admin/system-admin.html$ /nova/$1/admin/index.html +redirectmatch 301 ^/nova/([^/]+)/admin/port_with_resource_request.html$ /nova/$1/admin/ports-with-resource-requests.html +redirectmatch 301 ^/nova/([^/]+)/admin/manage-users.html$ /nova/$1/admin/arch.html +redirectmatch 301 ^/nova/([^/]+)/admin/mitigation-for-Intel-MDS-security-flaws.html /nova/$1/admin/cpu-models.html diff --git a/doc/source/figures/nova-weighting-hosts.png b/doc/source/_static/images/nova-weighting-hosts.png similarity index 100% rename from doc/source/figures/nova-weighting-hosts.png rename to doc/source/_static/images/nova-weighting-hosts.png diff --git a/doc/source/_static/images/traits-taxonomy.svg b/doc/source/_static/images/traits-taxonomy.svg new file mode 100644 index 00000000000..e05884a20f6 --- /dev/null +++ b/doc/source/_static/images/traits-taxonomy.svg @@ -0,0 +1,330 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/source/figures/vmware-nova-driver-architecture.jpg b/doc/source/_static/images/vmware-nova-driver-architecture.jpg similarity index 100% rename from doc/source/figures/vmware-nova-driver-architecture.jpg rename to doc/source/_static/images/vmware-nova-driver-architecture.jpg diff --git a/doc/source/figures/xenserver_architecture.png b/doc/source/_static/images/xenserver_architecture.png similarity index 100% rename from doc/source/figures/xenserver_architecture.png rename to doc/source/_static/images/xenserver_architecture.png diff --git a/doc/source/admin/admin-password-injection.rst b/doc/source/admin/admin-password-injection.rst index dbd0081da9d..278f7b02a8c 100644 --- a/doc/source/admin/admin-password-injection.rst +++ b/doc/source/admin/admin-password-injection.rst @@ -10,18 +10,9 @@ command. You can also view and set the admin password from the dashboard. .. rubric:: Password injection using the dashboard -By default, the dashboard will display the ``admin`` password and allow the -user to modify it. - -If you do not want to support password injection, disable the password fields -by editing the dashboard's ``local_settings.py`` file. - -.. code-block:: none - - OPENSTACK_HYPERVISOR_FEATURES = { - ... - 'can_set_password': False, - } +For password injection display in the dashboard, please refer to the setting of +``can_set_password`` in :horizon-doc:`Horizon doc +` .. rubric:: Password injection on libvirt-based hypervisors @@ -45,16 +36,13 @@ the ``/etc/shadow`` file inside the virtual machine instance. Users can only use :command:`ssh` to access the instance by using the admin password if the virtual machine image is a Linux distribution, and it has - been configured to allow users to use :command:`ssh` as the root user. This - is not the case for `Ubuntu cloud images `_ + been configured to allow users to use :command:`ssh` as the root user with + password authorization. This is not the case for + `Ubuntu cloud images `_ which, by default, does not allow users to use :command:`ssh` to access the - root account. - -.. rubric:: Password injection and XenAPI (XenServer/XCP) - -When using the XenAPI hypervisor back end, Compute uses the XenAPI agent to -inject passwords into guests. The virtual machine image must be configured with -the agent for password injection to work. + root account, or + `CentOS cloud images `_ which, by default, + does not allow :command:`ssh` access to the instance with password. .. rubric:: Password injection and Windows images (all hypervisors) diff --git a/doc/source/admin/adv-config.rst b/doc/source/admin/adv-config.rst deleted file mode 100644 index 82e6be6b32d..00000000000 --- a/doc/source/admin/adv-config.rst +++ /dev/null @@ -1,30 +0,0 @@ -====================== -Advanced configuration -====================== - -OpenStack clouds run on platforms that differ greatly in the capabilities that -they provide. By default, the Compute service seeks to abstract the underlying -hardware that it runs on, rather than exposing specifics about the underlying -host platforms. This abstraction manifests itself in many ways. For example, -rather than exposing the types and topologies of CPUs running on hosts, the -service exposes a number of generic CPUs (virtual CPUs, or vCPUs) and allows -for overcommitting of these. In a similar manner, rather than exposing the -individual types of network devices available on hosts, generic -software-powered network ports are provided. These features are designed to -allow high resource utilization and allows the service to provide a generic -cost-effective and highly scalable cloud upon which to build applications. - -This abstraction is beneficial for most workloads. However, there are some -workloads where determinism and per-instance performance are important, if not -vital. In these cases, instances can be expected to deliver near-native -performance. The Compute service provides features to improve individual -instance for these kind of workloads. - -.. toctree:: - :maxdepth: 2 - - pci-passthrough - cpu-topologies - huge-pages - virtual-gpu - file-backed-memory diff --git a/doc/source/admin/aggregates.rst b/doc/source/admin/aggregates.rst new file mode 100644 index 00000000000..621af8caa42 --- /dev/null +++ b/doc/source/admin/aggregates.rst @@ -0,0 +1,394 @@ +=============== +Host aggregates +=============== + +Host aggregates are a mechanism for partitioning hosts in an OpenStack cloud, +or a region of an OpenStack cloud, based on arbitrary characteristics. +Examples where an administrator may want to do this include where a group of +hosts have additional hardware or performance characteristics. + +Host aggregates started out as a way to use Xen hypervisor resource pools, but +have been generalized to provide a mechanism to allow administrators to assign +key-value pairs to groups of machines. Each node can have multiple aggregates, +each aggregate can have multiple key-value pairs, and the same key-value pair +can be assigned to multiple aggregates. This information can be used in the +scheduler to enable advanced scheduling, to set up Xen hypervisor resource +pools or to define logical groups for migration. + +Host aggregates are not explicitly exposed to users. Instead administrators map +flavors to host aggregates. Administrators do this by setting metadata on a +host aggregate, and matching flavor extra specifications. The scheduler then +endeavors to match user requests for instances of the given flavor to a host +aggregate with the same key-value pair in its metadata. Compute nodes can be in +more than one host aggregate. Weight multipliers can be controlled on a +per-aggregate basis by setting the desired ``xxx_weight_multiplier`` aggregate +metadata. + +Administrators are able to optionally expose a host aggregate as an +:term:`Availability Zone`. Availability zones are different from host +aggregates in that they are explicitly exposed to the user, and hosts can only +be in a single availability zone. Administrators can configure a default +availability zone where instances will be scheduled when the user fails to +specify one. For more information on how to do this, refer to +:doc:`/admin/availability-zones`. + + +.. _config-sch-for-aggs: + +Configure scheduler to support host aggregates +---------------------------------------------- + +One common use case for host aggregates is when you want to support scheduling +instances to a subset of compute hosts because they have a specific capability. +For example, you may want to allow users to request compute hosts that have SSD +drives if they need access to faster disk I/O, or access to compute hosts that +have GPU cards to take advantage of GPU-accelerated code. + +To configure the scheduler to support host aggregates, the +:oslo.config:option:`filter_scheduler.enabled_filters` configuration option +must contain the ``AggregateInstanceExtraSpecsFilter`` in addition to the other +filters used by the scheduler. Add the following line to ``nova.conf`` on the +host that runs the ``nova-scheduler`` service to enable host aggregates +filtering, as well as the other filters that are typically enabled: + +.. code-block:: ini + + [filter_scheduler] + enabled_filters=...,AggregateInstanceExtraSpecsFilter + +Example: Specify compute hosts with SSDs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This example configures the Compute service to enable users to request nodes +that have solid-state drives (SSDs). You create a ``fast-io`` host aggregate in +the ``nova`` availability zone and you add the ``ssd=true`` key-value pair to +the aggregate. Then, you add the ``node1``, and ``node2`` compute nodes to it. + +.. code-block:: console + + $ openstack aggregate create --zone nova fast-io + +-------------------+----------------------------+ + | Field | Value | + +-------------------+----------------------------+ + | availability_zone | nova | + | created_at | 2016-12-22T07:31:13.013466 | + | deleted | False | + | deleted_at | None | + | id | 1 | + | name | fast-io | + | updated_at | None | + +-------------------+----------------------------+ + + $ openstack aggregate set --property ssd=true 1 + +-------------------+----------------------------+ + | Field | Value | + +-------------------+----------------------------+ + | availability_zone | nova | + | created_at | 2016-12-22T07:31:13.000000 | + | deleted | False | + | deleted_at | None | + | hosts | [] | + | id | 1 | + | name | fast-io | + | properties | ssd='true' | + | updated_at | None | + +-------------------+----------------------------+ + + $ openstack aggregate add host 1 node1 + +-------------------+--------------------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------------------+ + | availability_zone | nova | + | created_at | 2016-12-22T07:31:13.000000 | + | deleted | False | + | deleted_at | None | + | hosts | [u'node1'] | + | id | 1 | + | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} | + | name | fast-io | + | updated_at | None | + +-------------------+--------------------------------------------------+ + + $ openstack aggregate add host 1 node2 + +-------------------+--------------------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------------------+ + | availability_zone | nova | + | created_at | 2016-12-22T07:31:13.000000 | + | deleted | False | + | deleted_at | None | + | hosts | [u'node1', u'node2'] | + | id | 1 | + | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} | + | name | fast-io | + | updated_at | None | + +-------------------+--------------------------------------------------+ + +Use the :command:`openstack flavor create` command to create the ``ssd.large`` +flavor called with an ID of 6, 8 GB of RAM, 80 GB root disk, and 4 vCPUs. + +.. code-block:: console + + $ openstack flavor create --id 6 --ram 8192 --disk 80 --vcpus 4 ssd.large + +----------------------------+-----------+ + | Field | Value | + +----------------------------+-----------+ + | OS-FLV-DISABLED:disabled | False | + | OS-FLV-EXT-DATA:ephemeral | 0 | + | disk | 80 | + | id | 6 | + | name | ssd.large | + | os-flavor-access:is_public | True | + | ram | 8192 | + | rxtx_factor | 1.0 | + | swap | | + | vcpus | 4 | + +----------------------------+-----------+ + +Once the flavor is created, specify one or more key-value pairs that match the +key-value pairs on the host aggregates with scope +``aggregate_instance_extra_specs``. In this case, that is the +``aggregate_instance_extra_specs:ssd=true`` key-value pair. Setting a +key-value pair on a flavor is done using the :command:`openstack flavor set` +command. + +.. code-block:: console + + $ openstack flavor set \ + --property aggregate_instance_extra_specs:ssd=true ssd.large + +Once it is set, you should see the ``extra_specs`` property of the +``ssd.large`` flavor populated with a key of ``ssd`` and a corresponding value +of ``true``. + +.. code-block:: console + + $ openstack flavor show ssd.large + +----------------------------+-------------------------------------------+ + | Field | Value | + +----------------------------+-------------------------------------------+ + | OS-FLV-DISABLED:disabled | False | + | OS-FLV-EXT-DATA:ephemeral | 0 | + | disk | 80 | + | id | 6 | + | name | ssd.large | + | os-flavor-access:is_public | True | + | properties | aggregate_instance_extra_specs:ssd='true' | + | ram | 8192 | + | rxtx_factor | 1.0 | + | swap | | + | vcpus | 4 | + +----------------------------+-------------------------------------------+ + +Now, when a user requests an instance with the ``ssd.large`` flavor, +the scheduler only considers hosts with the ``ssd=true`` key-value pair. +In this example, these are ``node1`` and ``node2``. + + +Aggregates in Placement +----------------------- + +Aggregates also exist in placement and are not the same thing as host +aggregates in nova. These aggregates are defined (purely) as groupings of +related resource providers. Since compute nodes in nova are represented in +placement as resource providers, they can be added to a placement aggregate as +well. For example, get the UUID of the compute node using :command:`openstack +hypervisor list` and add it to an aggregate in placement using +:command:`openstack resource provider aggregate set`. + +.. code-block:: console + + $ openstack --os-compute-api-version=2.53 hypervisor list + +--------------------------------------+---------------------+-----------------+-----------------+-------+ + | ID | Hypervisor Hostname | Hypervisor Type | Host IP | State | + +--------------------------------------+---------------------+-----------------+-----------------+-------+ + | 815a5634-86fb-4e1e-8824-8a631fee3e06 | node1 | QEMU | 192.168.1.123 | up | + +--------------------------------------+---------------------+-----------------+-----------------+-------+ + + $ openstack --os-placement-api-version=1.2 resource provider aggregate set \ + --aggregate df4c74f3-d2c4-4991-b461-f1a678e1d161 \ + 815a5634-86fb-4e1e-8824-8a631fee3e06 + +Some scheduling filter operations can be performed by placement for increased +speed and efficiency. + +.. note:: + + The nova-api service attempts (as of nova 18.0.0) to automatically mirror + the association of a compute host with an aggregate when an administrator + adds or removes a host to/from a nova host aggregate. This should alleviate + the need to manually create those association records in the placement API + using the ``openstack resource provider aggregate set`` CLI invocation. + + +.. _tenant-isolation-with-placement: + +Tenant Isolation with Placement +------------------------------- + +In order to use placement to isolate tenants, there must be placement +aggregates that match the membership and UUID of nova host aggregates that you +want to use for isolation. The same key pattern in aggregate metadata used by +the :ref:`AggregateMultiTenancyIsolation` filter controls this function, and is +enabled by setting +:oslo.config:option:`scheduler.limit_tenants_to_placement_aggregate` to +``True``. + +.. code-block:: console + + $ openstack --os-compute-api-version=2.53 aggregate create myagg + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | availability_zone | None | + | created_at | 2018-03-29T16:22:23.175884 | + | deleted | False | + | deleted_at | None | + | id | 4 | + | name | myagg | + | updated_at | None | + | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | + +-------------------+--------------------------------------+ + + $ openstack --os-compute-api-version=2.53 aggregate add host myagg node1 + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | availability_zone | None | + | created_at | 2018-03-29T16:22:23.175884 | + | deleted | False | + | deleted_at | None | + | hosts | [u'node1'] | + | id | 4 | + | name | myagg | + | updated_at | None | + | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | + +-------------------+--------------------------------------+ + + $ openstack project list -f value | grep 'demo' + 9691591f913949818a514f95286a6b90 demo + + $ openstack aggregate set \ + --property filter_tenant_id=9691591f913949818a514f95286a6b90 myagg + + $ openstack --os-placement-api-version=1.2 resource provider aggregate set \ + --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 \ + 815a5634-86fb-4e1e-8824-8a631fee3e06 + +Note that the ``filter_tenant_id`` metadata key can be optionally suffixed +with any string for multiple tenants, such as ``filter_tenant_id3=$tenantid``. + + +Usage +----- + +Much of the configuration of host aggregates is driven from the API or +command-line clients. For example, to create a new aggregate and add hosts to +it using the :command:`openstack` client, run: + +.. code-block:: console + + $ openstack aggregate create my-aggregate + $ openstack aggregate add host my-aggregate my-host + +To list all aggregates and show information about a specific aggregate, run: + +.. code-block:: console + + $ openstack aggregate list + $ openstack aggregate show my-aggregate + +To set and unset a property on the aggregate, run: + +.. code-block:: console + + $ openstack aggregate set --property pinned=true my-aggregrate + $ openstack aggregate unset --property pinned my-aggregate + +To rename the aggregate, run: + +.. code-block:: console + + $ openstack aggregate set --name my-awesome-aggregate my-aggregate + +To remove a host from an aggregate and delete the aggregate, run: + +.. code-block:: console + + $ openstack aggregate remove host my-aggregate my-host + $ openstack aggregate delete my-aggregate + +For more information, refer to the :python-openstackclient-doc:`OpenStack +Client documentation `. + + +Configuration +------------- + +In addition to CRUD operations enabled by the API and clients, the following +configuration options can be used to configure how host aggregates and the +related availability zones feature operate under the hood: + +- :oslo.config:option:`default_schedule_zone` +- :oslo.config:option:`scheduler.limit_tenants_to_placement_aggregate` +- :oslo.config:option:`cinder.cross_az_attach` + +Finally, as discussed previously, there are a number of host aggregate-specific +scheduler filters. These are: + +- :ref:`AggregateImagePropertiesIsolation` +- :ref:`AggregateInstanceExtraSpecsFilter` +- :ref:`AggregateIoOpsFilter` +- :ref:`AggregateMultiTenancyIsolation` +- :ref:`AggregateNumInstancesFilter` +- :ref:`AggregateTypeAffinityFilter` + +The following configuration options are applicable to the scheduler +configuration: + +- :oslo.config:option:`cpu_allocation_ratio` +- :oslo.config:option:`ram_allocation_ratio` +- :oslo.config:option:`filter_scheduler.max_instances_per_host` +- :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_separator` +- :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_namespace` + +.. _image-caching-aggregates: + +Image Caching +------------- + +Aggregates can be used as a way to target multiple compute nodes for the purpose of +requesting that images be pre-cached for performance reasons. + +.. note:: + + `Some of the virt drivers`_ provide image caching support, which improves performance + of second-and-later boots of the same image by keeping the base image in an on-disk + cache. This avoids the need to re-download the image from Glance, which reduces + network utilization and time-to-boot latency. Image pre-caching is the act of priming + that cache with images ahead of time to improve performance of the first boot. + +.. _Some of the virt drivers: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_cache_images + +Assuming an aggregate called ``my-aggregate`` where two images should +be pre-cached, running the following command will initiate the +request: + +.. code-block:: console + + $ nova aggregate-cache-images my-aggregate image1 image2 + +Note that image pre-caching happens asynchronously in a best-effort +manner. The images and aggregate provided are checked by the server +when the command is run, but the compute nodes are not checked to see +if they support image caching until the process runs. Progress and +results are logged by each compute, and the process sends +``aggregate.cache_images.start``, ``aggregate.cache_images.progress``, +and ``aggregate.cache_images.end`` notifications, which may be useful +for monitoring the operation externally. + +References +---------- + +- `Curse your bones, Availability Zones! (Openstack Summit Vancouver 2018) + `__ diff --git a/doc/source/admin/arch.rst b/doc/source/admin/arch.rst index c7fe4d28b3c..c141fabcbdf 100644 --- a/doc/source/admin/arch.rst +++ b/doc/source/admin/arch.rst @@ -43,7 +43,7 @@ Compute controls hypervisors through an API server. Selecting the best hypervisor to use can be difficult, and you must take budget, resource constraints, supported features, and required technical specifications into account. However, the majority of OpenStack development is done on systems -using KVM and Xen-based hypervisors. For a detailed list of features and +using KVM-based hypervisors. For a detailed list of features and support across different hypervisors, see :doc:`/user/support-matrix`. You can also orchestrate clouds using multiple hypervisors in different @@ -51,24 +51,25 @@ availability zones. Compute supports the following hypervisors: - :ironic-doc:`Baremetal <>` -- `Docker `__ - - `Hyper-V - `__ + `__ - `Kernel-based Virtual Machine (KVM) - `__ + `__ + +- `Linux Containers (LXC) `__ -- `Linux Containers (LXC) `__ +- `PowerVM `__ -- `Quick Emulator (QEMU) `__ +- `Quick Emulator (QEMU) `__ -- `User Mode Linux (UML) `__ +- `Virtuozzo `__ - `VMware vSphere `__ -- `Xen `__ + +- `zVM `__ For more information about hypervisors, see :doc:`/admin/configuration/hypervisors` @@ -77,6 +78,9 @@ section in the Nova Configuration Reference. Projects, users, and roles ~~~~~~~~~~~~~~~~~~~~~~~~~~ +To begin using Compute, you must create a user with the +:keystone-doc:`Identity service <>`. + The Compute system is designed to be used by different consumers in the form of projects on a shared system, and role-based access assignments. Roles control the actions that a user is allowed to perform. @@ -103,7 +107,7 @@ For projects, you can use quota controls to limit the: Roles control the actions a user is allowed to perform. By default, most actions do not require a particular role, but you can configure them by editing -the ``policy.json`` file for user roles. For example, a rule can be defined so +the ``policy.yaml`` file for user roles. For example, a rule can be defined so that a user must have the ``admin`` role in order to be able to allocate a public IP address. @@ -228,7 +232,7 @@ The displayed image attributes are: Virtual hardware templates are called ``flavors``. By default, these are configurable by admin users, however that behavior can be changed by redefining the access controls for ``compute_extension:flavormanage`` in -``/etc/nova/policy.json`` on the ``compute-api`` server. +``/etc/nova/policy.yaml`` on the ``compute-api`` server. For more information, refer to :doc:`/configuration/policy`. For a list of flavors that are available on your system: diff --git a/doc/source/admin/availability-zones.rst b/doc/source/admin/availability-zones.rst index dc4f8963344..678aff2c5a5 100644 --- a/doc/source/admin/availability-zones.rst +++ b/doc/source/admin/availability-zones.rst @@ -1,70 +1,284 @@ -========================================= -Select hosts where instances are launched -========================================= +================== +Availability Zones +================== -With the appropriate permissions, you can select which host instances are -launched on and which roles can boot instances on this host. +.. note:: -#. To select the host where instances are launched, use the - ``--availability-zone ZONE:HOST:NODE`` parameter on the :command:`openstack - server create` command. + This section provides deployment and admin-user usage information about the + availability zone feature. For end-user information about availability + zones, refer to the :doc:`user guide `. - For example: +Availability Zones are an end-user visible logical abstraction for partitioning +a cloud without knowing the physical infrastructure. Availability zones are not +modeled in the database; rather, they are defined by attaching specific +metadata information to an :doc:`aggregate ` The addition of +this specific metadata to an aggregate makes the aggregate visible from an +end-user perspective and consequently allows users to schedule instances to a +specific set of hosts, the ones belonging to the aggregate. - .. code-block:: console +However, despite their similarities, there are a few additional differences to +note when comparing availability zones and host aggregates: - $ openstack server create --image IMAGE --flavor m1.tiny \ - --key-name KEY --availability-zone ZONE:HOST:NODE \ - --nic net-id=UUID SERVER +- A host can be part of multiple aggregates but it can only be in one + availability zone. - .. note:: +- By default a host is part of a default availability zone even if it doesn't + belong to an aggregate. The name of this default availability zone can be + configured using :oslo.config:option:`default_availability_zone` config + option. - HOST and NODE are optional parameters. In such cases, use the - ``--availability-zone ZONE::NODE``, ``--availability-zone ZONE:HOST`` or - ``--availability-zone ZONE``. + .. warning:: -#. To specify which roles can launch an instance on a specified host, enable - the ``create:forced_host`` option in the ``policy.json`` file. By default, - this option is enabled for only the admin role. If you see ``Forbidden (HTTP - 403)`` in return, then you are not using admin credentials. + The use of the default availability zone name in requests can be very + error-prone. Since the user can see the list of availability zones, they + have no way to know whether the default availability zone name (currently + ``nova``) is provided because an host belongs to an aggregate whose AZ + metadata key is set to ``nova``, or because there is at least one host + not belonging to any aggregate. Consequently, it is highly recommended + for users to never ever ask for booting an instance by specifying an + explicit AZ named ``nova`` and for operators to never set the AZ metadata + for an aggregate to ``nova``. This can result is some problems due to the + fact that the instance AZ information is explicitly attached to ``nova`` + which could break further move operations when either the host is moved + to another aggregate or when the user would like to migrate the instance. -#. To view the list of valid zones, use the :command:`openstack availability - zone list` command. + .. note:: - .. code-block:: console + Availability zone names must NOT contain ``:`` since it is used by admin + users to specify hosts where instances are launched in server creation. + See `Using availability zones to select hosts`_ for more information. - $ openstack availability zone list - +-----------+-------------+ - | Zone Name | Zone Status | - +-----------+-------------+ - | zone1 | available | - | zone2 | available | - +-----------+-------------+ +In addition, other services, such as the :neutron-doc:`networking service <>` +and the :cinder-doc:`block storage service <>`, also provide an availability +zone feature. However, the implementation of these features differs vastly +between these different services. Consult the documentation for these other +services for more information on their implementation of this feature. -#. To view the list of valid compute hosts, use the :command:`openstack host - list` command. - .. code-block:: console +.. _availability-zones-with-placement: - $ openstack host list - +----------------+-------------+----------+ - | Host Name | Service | Zone | - +----------------+-------------+----------+ - | compute01 | compute | nova | - | compute02 | compute | nova | - +----------------+-------------+----------+ +Availability Zones with Placement +--------------------------------- +In order to use placement to honor availability zone requests, there must be +placement aggregates that match the membership and UUID of nova host aggregates +that you assign as availability zones. The same key in aggregate metadata used +by the `AvailabilityZoneFilter` filter controls this function, and is enabled by +setting :oslo.config:option:`scheduler.query_placement_for_availability_zone` +to ``True``. As of 24.0.0 (Xena), this is the default. -#. To view the list of valid compute nodes, use the :command:`openstack - hypervisor list` command. +.. code-block:: console - .. code-block:: console + $ openstack --os-compute-api-version=2.53 aggregate create myaz + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | availability_zone | None | + | created_at | 2018-03-29T16:22:23.175884 | + | deleted | False | + | deleted_at | None | + | id | 4 | + | name | myaz | + | updated_at | None | + | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | + +-------------------+--------------------------------------+ - $ openstack hypervisor list - +----+---------------------+ - | ID | Hypervisor Hostname | - +----+---------------------+ - | 1 | server2 | - | 2 | server3 | - | 3 | server4 | - +----+---------------------+ + $ openstack --os-compute-api-version=2.53 aggregate add host myaz node1 + +-------------------+--------------------------------------+ + | Field | Value | + +-------------------+--------------------------------------+ + | availability_zone | None | + | created_at | 2018-03-29T16:22:23.175884 | + | deleted | False | + | deleted_at | None | + | hosts | [u'node1'] | + | id | 4 | + | name | myagg | + | updated_at | None | + | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | + +-------------------+--------------------------------------+ + + $ openstack aggregate set --property availability_zone=az002 myaz + + $ openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 815a5634-86fb-4e1e-8824-8a631fee3e06 + +Without the above configuration, the `AvailabilityZoneFilter` filter must be +enabled in :oslo.config:option:`filter_scheduler.enabled_filters` to retain +proper behavior. + +Implications for moving servers +------------------------------- + +There are several ways to move a server to another host: evacuate, resize, +cold migrate, live migrate, and unshelve. Move operations typically go through +the scheduler to pick the target host *unless* a target host is specified and +the request forces the server to that host by bypassing the scheduler. Only +evacuate and live migrate can forcefully bypass the scheduler and move a +server to a specified host and even then it is highly recommended to *not* +force and bypass the scheduler. + +With respect to availability zones, a server is restricted to a zone if: + +1. The server was created in a specific zone with the ``POST /servers`` request + containing the ``availability_zone`` parameter. + +2. If the server create request did not contain the ``availability_zone`` + parameter but the API service is configured for + :oslo.config:option:`default_schedule_zone` then by default the server will + be scheduled to that zone. + +3. The shelved offloaded server was unshelved by specifying the + ``availability_zone`` with the ``POST /servers/{server_id}/action`` request + using microversion 2.77 or greater. + +4. :oslo.config:option:`cinder.cross_az_attach` is False, + :oslo.config:option:`default_schedule_zone` is None, + the server is created without an explicit zone but with pre-existing volume + block device mappings. In that case the server will be created in the same + zone as the volume(s) if the volume zone is not the same as + :oslo.config:option:`default_availability_zone`. See `Resource affinity`_ + for details. + +If the server was not created in a specific zone then it is free to be moved +to other zones, i.e. the :ref:`AvailabilityZoneFilter ` +is a no-op. + +Knowing this, it is dangerous to force a server to another host with evacuate +or live migrate if the server is restricted to a zone and is then forced to +move to a host in another zone, because that will create an inconsistency in +the internal tracking of where that server should live and may require manually +updating the database for that server. For example, if a user creates a server +in zone A and then the admin force live migrates the server to zone B, and then +the user resizes the server, the scheduler will try to move it back to zone A +which may or may not work, e.g. if the admin deleted or renamed zone A in the +interim. + +Resource affinity +~~~~~~~~~~~~~~~~~ + +The :oslo.config:option:`cinder.cross_az_attach` configuration option can be +used to restrict servers and the volumes attached to servers to the same +availability zone. + +A typical use case for setting ``cross_az_attach=False`` is to enforce compute +and block storage affinity, for example in a High Performance Compute cluster. + +By default ``cross_az_attach`` is True meaning that the volumes attached to +a server can be in a different availability zone than the server. If set to +False, then when creating a server with pre-existing volumes or attaching a +volume to a server, the server and volume zone must match otherwise the +request will fail. In addition, if the nova-compute service creates the volumes +to attach to the server during server create, it will request that those +volumes are created in the same availability zone as the server, which must +exist in the block storage (cinder) service. + +As noted in the `Implications for moving servers`_ section, forcefully moving +a server to another zone could also break affinity with attached volumes. + +.. note:: + + ``cross_az_attach=False`` is not widely used nor tested extensively and + thus suffers from some known issues: + + * `Bug 1694844 `_. This is + fixed in the 21.0.0 (Ussuri) release by using the volume zone for the + server being created if the server is created without an explicit zone, + :oslo.config:option:`default_schedule_zone` is None, and the volume zone + does not match the value of + :oslo.config:option:`default_availability_zone`. + * `Bug 1781421 `_ + + +.. _using-availability-zones-to-select-hosts: + +Using availability zones to select hosts +---------------------------------------- + +We can combine availability zones with a specific host and/or node to select +where an instance is launched. For example: + +.. code-block:: console + + $ openstack server create --availability-zone ZONE:HOST:NODE ... SERVER + +.. note:: + + It is possible to use ``ZONE``, ``ZONE:HOST``, and ``ZONE::NODE``. + +.. note:: + + This is an admin-only operation by default, though you can modify this + behavior using the ``os_compute_api:servers:create:forced_host`` rule in + ``policy.yaml``. + +However, as discussed `previously `_, when +launching instances in this manner the scheduler filters are not run. For this +reason, this behavior is considered legacy behavior and, starting with the 2.74 +microversion, it is now possible to specify a host or node explicitly. For +example: + +.. code-block:: console + + $ openstack --os-compute-api-version 2.74 server create \ + --host HOST --hypervisor-hostname HYPERVISOR ... SERVER + +.. note:: + + This is an admin-only operation by default, though you can modify this + behavior using the ``compute:servers:create:requested_destination`` rule in + ``policy.yaml``. + +This avoids the need to explicitly select an availability zone and ensures the +scheduler filters are not bypassed. + + +Usage +----- + +Creating an availability zone (AZ) is done by associating metadata with a +:doc:`host aggregate `. For this reason, the +:command:`openstack` client provides the ability to create a host aggregate and +associate it with an AZ in one command. For example, to create a new aggregate, +associating it with an AZ in the process, and add host to it using the +:command:`openstack` client, run: + +.. code-block:: console + + $ openstack aggregate create --zone my-availability-zone my-aggregate + $ openstack aggregate add host my-aggregate my-host + +.. note:: + + While it is possible to add a host to multiple host aggregates, it is not + possible to add them to multiple availability zones. Attempting to add a + host to multiple host aggregates associated with differing availability + zones will result in a failure. + +Alternatively, you can set this metadata manually for an existing host +aggregate. For example: + +.. code-block:: console + + $ openstack aggregate set \ + --property availability_zone=my-availability-zone my-aggregate + +To list all host aggregates and show information about a specific aggregate, in +order to determine which AZ the host aggregate(s) belong to, run: + +.. code-block:: console + + $ openstack aggregate list --long + $ openstack aggregate show my-aggregate + +Finally, to disassociate a host aggregate from an availability zone, run: + +.. code-block:: console + + $ openstack aggregate unset --property availability_zone my-aggregate + + +Configuration +------------- + +Refer to :doc:`/admin/aggregates` for information on configuring both host +aggregates and availability zones. diff --git a/doc/source/admin/cells.rst b/doc/source/admin/cells.rst new file mode 100644 index 00000000000..92f336a4e40 --- /dev/null +++ b/doc/source/admin/cells.rst @@ -0,0 +1,92 @@ +================== +CellsV2 Management +================== + +This section describes the various recommended practices/tips for runnning and +maintaining CellsV2 for admins and operators. For more details regarding the +basic concept of CellsV2 and its layout please see the main :doc:`/user/cellsv2-layout` +page. + +.. _handling-cell-failures: + +Handling cell failures +---------------------- + +For an explanation on how ``nova-api`` handles cell failures please see the +`Handling Down Cells `__ +section of the Compute API guide. Below, you can find some recommended practices and +considerations for effectively tolerating cell failure situations. + +Configuration considerations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since a cell being reachable or not is determined through timeouts, it is suggested +to provide suitable values for the following settings based on your requirements. + +#. :oslo.config:option:`database.max_retries` is 10 by default meaning every time + a cell becomes unreachable, it would retry 10 times before nova can declare the + cell as a "down" cell. +#. :oslo.config:option:`database.retry_interval` is 10 seconds and + :oslo.config:option:`oslo_messaging_rabbit.rabbit_retry_interval` is 1 second by + default meaning every time a cell becomes unreachable it would retry every 10 + seconds or 1 second depending on if it's a database or a message queue problem. +#. Nova also has a timeout value called ``CELL_TIMEOUT`` which is hardcoded to 60 + seconds and that is the total time the nova-api would wait before returning + partial results for the "down" cells. + +The values of the above settings will affect the time required for nova to decide +if a cell is unreachable and then take the necessary actions like returning +partial results. + +The operator can also control the results of certain actions like listing +servers and services depending on the value of the +:oslo.config:option:`api.list_records_by_skipping_down_cells` config option. +If this is true, the results from the unreachable cells will be skipped +and if it is false, the request will just fail with an API error in situations where +partial constructs cannot be computed. + +Disabling down cells +~~~~~~~~~~~~~~~~~~~~ + +While the temporary outage in the infrastructure is being fixed, the affected +cells can be disabled so that they are removed from being scheduling candidates. +To enable or disable a cell, use :command:`nova-manage cell_v2 update_cell +--cell_uuid --disable`. See the :ref:`man-page-cells-v2` man page +for details on command usage. + +Known issues +~~~~~~~~~~~~ + +1. **Services and Performance:** In case a cell is down during the startup of nova + services, there is the chance that the services hang because of not being able + to connect to all the cell databases that might be required for certain calculations + and initializations. An example scenario of this situation is if + :oslo.config:option:`upgrade_levels.compute` is set to ``auto`` then the + ``nova-api`` service hangs on startup if there is at least one unreachable + cell. This is because it needs to connect to all the cells to gather + information on each of the compute service's version to determine the compute + version cap to use. The current workaround is to pin the + :oslo.config:option:`upgrade_levels.compute` to a particular version like + "rocky" and get the service up under such situations. See `bug 1815697 + `__ for more details. Also note + that in general during situations where cells are not reachable certain + "slowness" may be experienced in operations requiring hitting all the cells + because of the aforementioned configurable timeout/retry values. + +.. _cells-counting-quotas: + +2. **Counting Quotas:** Another known issue is in the current approach of counting + quotas where we query each cell database to get the used resources and aggregate + them which makes it sensitive to temporary cell outages. While the cell is + unavailable, we cannot count resource usage residing in that cell database and + things would behave as though more quota is available than should be. That is, + if a tenant has used all of their quota and part of it is in cell A and cell A + goes offline temporarily, that tenant will suddenly be able to allocate more + resources than their limit (assuming cell A returns, the tenant will have more + resources allocated than their allowed quota). + + .. note:: Starting in the Train (20.0.0) release, it is possible to + configure counting of quota usage from the placement service and + API database to make quota usage calculations resilient to down or + poor-performing cells in a multi-cell environment. See the + :doc:`quotas documentation` for more details. diff --git a/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst b/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst index cab607bbdb3..ef4e1fcf00b 100644 --- a/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst +++ b/doc/source/admin/common/nova-show-usage-statistics-for-hosts-instances.rst @@ -30,7 +30,6 @@ The following examples show the host usage statistics for a host called | devstack | compute | nova | | devstack | network | internal | | devstack | scheduler | internal | - | devstack | consoleauth | internal | +-----------+-------------+----------+ * Get a summary of resource usage of all of the instances running on the host: @@ -95,7 +94,7 @@ Show instance usage statistics have a standard format as below. Before microversion 2.48, each hypervisor had its own format. For more details on diagnostics response message see `server diagnostics api - `__ + `__ documentation. .. code-block:: console diff --git a/doc/source/admin/config-drive.rst b/doc/source/admin/config-drive.rst new file mode 100644 index 00000000000..05f553478b9 --- /dev/null +++ b/doc/source/admin/config-drive.rst @@ -0,0 +1,109 @@ +============= +Config drives +============= + +.. note:: + + This section provides deployment information about the config drive feature. + For end-user information about the config drive feature and instance metadata + in general, refer to the :doc:`user guide `. + +Config drives are special drives that are attached to an instance when it boots. +The instance can mount this drive and read files from it to get information that +is normally available through :doc:`the metadata service +`. + +There are many use cases for the config drive. One such use case is to pass a +networking configuration when you do not use DHCP to assign IP addresses to +instances. For example, you might pass the IP address configuration for the +instance through the config drive, which the instance can mount and access +before you configure the network settings for the instance. Another common +reason to use config drives is load. If running something like the OpenStack +puppet providers in your instances, they can hit the :doc:`metadata servers +` every fifteen minutes, simultaneously for every +instance you have. They are just checking in, and building facts, but it's not +insignificant load. With a config drive, that becomes a local (cached) disk +read. Finally, using a config drive means you're not dependent on the metadata +service being up, reachable, or performing well to do things like reboot your +instance that runs `cloud-init`_ at the beginning. + +Any modern guest operating system that is capable of mounting an ISO 9660 or +VFAT file system can use the config drive. + + +Requirements and guidelines +--------------------------- + +To use the config drive, you must follow the following requirements for the +compute host and image. + +.. rubric:: Compute host requirements + +The following virt drivers support the config drive: libvirt, +Hyper-V, VMware, and (since 17.0.0 Queens) PowerVM. The Bare Metal service also +supports the config drive. + +- To use config drives with libvirt or VMware, you must first + install the :command:`genisoimage` package on each compute host. Use the + :oslo.config:option:`mkisofs_cmd` config option to set the path where you + install the :command:`genisoimage` program. If :command:`genisoimage` is in + the same path as the :program:`nova-compute` service, you do not need to set + this flag. + +- To use config drives with Hyper-V, you must set the + :oslo.config:option:`mkisofs_cmd` config option to the full path to an + :command:`mkisofs.exe` installation. Additionally, you must set the + :oslo.config:option:`hyperv.qemu_img_cmd` config option to the full path to an + :command:`qemu-img` command installation. + +- To use config drives with PowerVM or the Bare Metal service, you do not need + to prepare anything. + +.. rubric:: Image requirements + +An image built with a recent version of the `cloud-init`_ package can +automatically access metadata passed through the config drive. The cloud-init +package version 0.7.1 works with Ubuntu, Fedora based images (such as Red Hat +Enterprise Linux) and openSUSE based images (such as SUSE Linux Enterprise +Server). If an image does not have the cloud-init package installed, you must +customize the image to run a script that mounts the config drive on boot, reads +the data from the drive, and takes appropriate action such as adding the public +key to an account. For more details about how data is organized on the config +drive, refer to the :ref:`user guide `. + + +Configuration +------------- + +The :program:`nova-compute` service accepts the following config drive-related +options: + +- :oslo.config:option:`api.config_drive_skip_versions` +- :oslo.config:option:`force_config_drive` +- :oslo.config:option:`config_drive_format` + +If using the HyperV compute driver, the following additional options are +supported: + +- :oslo.config:option:`hyperv.config_drive_cdrom` + +For example, to ensure nova always provides a config drive to instances but +versions ``2018-08-27`` (Rocky) and ``2017-02-22`` (Ocata) are skipped, add the +following to :file:`nova.conf`: + +.. code-block:: ini + + [DEFAULT] + force_config_drive = True + + [api] + config_drive_skip_versions = 2018-08-27 2017-02-22 + +.. note:: + + The ``img_config_drive`` image metadata property can be used to force enable + the config drive. In addition, users can explicitly request a config drive + when booting instances. For more information, refer to the :ref:`user guide + `. + +.. _cloud-init: https://cloudinit.readthedocs.io/en/latest/ diff --git a/doc/source/admin/configuration/api.rst b/doc/source/admin/configuration/api.rst index 979169ee8f8..a8c2e6a0f4a 100644 --- a/doc/source/admin/configuration/api.rst +++ b/doc/source/admin/configuration/api.rst @@ -6,8 +6,9 @@ The Compute API, is the component of OpenStack Compute that receives and responds to user requests, whether they be direct API calls, or via the CLI tools or dashboard. + Configure Compute API password handling -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------------------- The OpenStack Compute API enables users to specify an administrative password when they create, rebuild, rescue or evacuate a server instance. diff --git a/doc/source/admin/configuration/cells.rst b/doc/source/admin/configuration/cells.rst deleted file mode 100644 index ddc2c1775aa..00000000000 --- a/doc/source/admin/configuration/cells.rst +++ /dev/null @@ -1,295 +0,0 @@ -========== -Cells (v1) -========== - -.. warning:: - - Configuring and implementing Cells v1 is not recommended for new deployments - of the Compute service (nova). Cells v2 replaces cells v1, and v2 is - required to install or upgrade the Compute service to the 15.0.0 Ocata - release. More information on cells v2 can be found in :doc:`/user/cells`. - -`Cells` functionality enables you to scale an OpenStack Compute cloud in a more -distributed fashion without having to use complicated technologies like -database and message queue clustering. It supports very large deployments. - -When this functionality is enabled, the hosts in an OpenStack Compute cloud are -partitioned into groups called cells. Cells are configured as a tree. The -top-level cell should have a host that runs a ``nova-api`` service, but no -``nova-compute`` services. Each child cell should run all of the typical -``nova-*`` services in a regular Compute cloud except for ``nova-api``. You can -think of cells as a normal Compute deployment in that each cell has its own -database server and message queue broker. - -The ``nova-cells`` service handles communication between cells and selects -cells for new instances. This service is required for every cell. Communication -between cells is pluggable, and currently the only option is communication -through RPC. - -Cells scheduling is separate from host scheduling. ``nova-cells`` first picks -a cell. Once a cell is selected and the new build request reaches its -``nova-cells`` service, it is sent over to the host scheduler in that cell and -the build proceeds as it would have without cells. - -Cell configuration options -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. todo:: This is duplication. We should be able to use the - oslo.config.sphinxext module to generate this for us - -Cells are disabled by default. All cell-related configuration options appear in -the ``[cells]`` section in ``nova.conf``. The following cell-related options -are currently supported: - -``enable`` - Set to ``True`` to turn on cell functionality. Default is ``false``. - -``name`` - Name of the current cell. Must be unique for each cell. - -``capabilities`` - List of arbitrary ``key=value`` pairs defining capabilities of the current - cell. Values include ``hypervisor=xenserver;kvm,os=linux;windows``. - -``call_timeout`` - How long in seconds to wait for replies from calls between cells. - -``scheduler_filter_classes`` - Filter classes that the cells scheduler should use. By default, uses - ``nova.cells.filters.all_filters`` to map to all cells filters included with - Compute. - -``scheduler_weight_classes`` - Weight classes that the scheduler for cells uses. By default, uses - ``nova.cells.weights.all_weighers`` to map to all cells weight algorithms - included with Compute. - -``ram_weight_multiplier`` - Multiplier used to weight RAM. Negative numbers indicate that Compute should - stack VMs on one host instead of spreading out new VMs to more hosts in the - cell. The default value is 10.0. - -Configure the API (top-level) cell -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The cell type must be changed in the API cell so that requests can be proxied -through ``nova-cells`` down to the correct cell properly. Edit the -``nova.conf`` file in the API cell, and specify ``api`` in the ``cell_type`` -key: - -.. code-block:: ini - - [DEFAULT] - compute_api_class=nova.compute.cells_api.ComputeCellsAPI - # ... - - [cells] - cell_type= api - -Configure the child cells -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Edit the ``nova.conf`` file in the child cells, and specify ``compute`` in the -``cell_type`` key: - -.. code-block:: ini - - [DEFAULT] - # Disable quota checking in child cells. Let API cell do it exclusively. - quota_driver=nova.quota.NoopQuotaDriver - - [cells] - cell_type = compute - -Configure the database in each cell -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before bringing the services online, the database in each cell needs to be -configured with information about related cells. In particular, the API cell -needs to know about its immediate children, and the child cells must know about -their immediate agents. The information needed is the ``RabbitMQ`` server -credentials for the particular cell. - -Use the :command:`nova-manage cell create` command to add this information to -the database in each cell: - -.. code-block:: console - - # nova-manage cell create -h - usage: nova-manage cell create [-h] [--name ] - [--cell_type ] - [--username ] [--password ] - [--broker_hosts ] - [--hostname ] [--port ] - [--virtual_host ] - [--woffset ] [--wscale ] - - optional arguments: - -h, --help show this help message and exit - --name Name for the new cell - --cell_type - Whether the cell is parent/api or child/compute - --username - Username for the message broker in this cell - --password - Password for the message broker in this cell - --broker_hosts - Comma separated list of message brokers in this cell. - Each Broker is specified as hostname:port with both - mandatory. This option overrides the --hostname and - --port options (if provided). - --hostname - Address of the message broker in this cell - --port Port number of the message broker in this cell - --virtual_host - The virtual host of the message broker in this cell - --woffset - --wscale - -As an example, assume an API cell named ``api`` and a child cell named -``cell1``. - -Within the ``api`` cell, specify the following ``RabbitMQ`` server information: - -.. code-block:: ini - - rabbit_host=10.0.0.10 - rabbit_port=5672 - rabbit_username=api_user - rabbit_password=api_passwd - rabbit_virtual_host=api_vhost - -Within the ``cell1`` child cell, specify the following ``RabbitMQ`` server -information: - -.. code-block:: ini - - rabbit_host=10.0.1.10 - rabbit_port=5673 - rabbit_username=cell1_user - rabbit_password=cell1_passwd - rabbit_virtual_host=cell1_vhost - -You can run this in the API cell as root: - -.. code-block:: console - - # nova-manage cell create --name cell1 --cell_type child \ - --username cell1_user --password cell1_passwd --hostname 10.0.1.10 \ - --port 5673 --virtual_host cell1_vhost --woffset 1.0 --wscale 1.0 - -Repeat the previous steps for all child cells. - -In the child cell, run the following, as root: - -.. code-block:: console - - # nova-manage cell create --name api --cell_type parent \ - --username api_user --password api_passwd --hostname 10.0.0.10 \ - --port 5672 --virtual_host api_vhost --woffset 1.0 --wscale 1.0 - -To customize the Compute cells, use the configuration option settings -documented above. - -Cell scheduling configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To determine the best cell to use to launch a new instance, Compute uses a set -of filters and weights defined in the ``/etc/nova/nova.conf`` file. The -following options are available to prioritize cells for scheduling: - -``scheduler_filter_classes`` - List of filter classes. By default ``nova.cells.filters.all_filters`` - is specified, which maps to all cells filters included with Compute - (see the section called :ref:`Filters `). - -``scheduler_weight_classes`` - List of weight classes. By default ``nova.cells.weights.all_weighers`` is - specified, which maps to all cell weight algorithms included with Compute. - The following modules are available: - - ``mute_child`` - Downgrades the likelihood of child cells being chosen for scheduling - requests, which haven't sent capacity or capability updates in a while. - Options include ``mute_weight_multiplier`` (multiplier for mute children; - value should be negative). - - ``ram_by_instance_type`` - Select cells with the most RAM capacity for the instance type being - requested. Because higher weights win, Compute returns the number of - available units for the instance type requested. The - ``ram_weight_multiplier`` option defaults to 10.0 that adds to the weight - by a factor of 10. - - Use a negative number to stack VMs on one host instead of spreading - out new VMs to more hosts in the cell. - - ``weight_offset`` - Allows modifying the database to weight a particular cell. You can use this - when you want to disable a cell (for example, '0'), or to set a default - cell by making its ``weight_offset`` very high (for example, - ``999999999999999``). The highest weight will be the first cell to be - scheduled for launching an instance. - -Additionally, the following options are available for the cell scheduler: - -``scheduler_retries`` - Specifies how many times the scheduler tries to launch a new instance when no - cells are available (default=10). - -``scheduler_retry_delay`` - Specifies the delay (in seconds) between retries (default=2). - -As an admin user, you can also add a filter that directs builds to a particular -cell. The ``policy.json`` file must have a line with -``"cells_scheduler_filter:TargetCellFilter" : "is_admin:True"`` to let an admin -user specify a scheduler hint to direct a build to a particular cell. - -Optional cell configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cells store all inter-cell communication data, including user names and -passwords, in the database. Because the cells data is not updated very -frequently, use the ``[cells]cells_config`` option to specify a JSON file to -store cells data. With this configuration, the database is no longer consulted -when reloading the cells data. The file must have columns present in the Cell -model (excluding common database fields and the ``id`` column). You must -specify the queue connection information through a ``transport_url`` field, -instead of ``username``, ``password``, and so on. - -The ``transport_url`` has the following form:: - - rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST - -The scheme can only be ``rabbit``. - -The following sample shows this optional configuration: - -.. code-block:: json - - { - "parent": { - "name": "parent", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": true - }, - "cell1": { - "name": "cell1", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit1.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": false - }, - "cell2": { - "name": "cell2", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit2.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": false - } - } diff --git a/doc/source/admin/configuration/cross-cell-resize.rst b/doc/source/admin/configuration/cross-cell-resize.rst new file mode 100644 index 00000000000..d17ee24109a --- /dev/null +++ b/doc/source/admin/configuration/cross-cell-resize.rst @@ -0,0 +1,309 @@ +================= +Cross-cell resize +================= + +This document describes how to configure nova for cross-cell resize. +For information on :term:`same-cell resize `, refer to +:doc:`/admin/configuration/resize`. + +Historically resizing and cold migrating a server has been explicitly +`restricted`_ to within the same cell in which the server already exists. +The cross-cell resize feature allows configuring nova to allow resizing +and cold migrating servers across cells. + +The full design details are in the `Ussuri spec`_ and there is a `video`_ from +a summit talk with a high-level overview. + +.. _restricted: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/conductor/tasks/migrate.py#L164 +.. _Ussuri spec: https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/cross-cell-resize.html +.. _video: https://www.openstack.org/videos/summits/denver-2019/whats-new-in-nova-cellsv2 + +Use case +-------- + +There are many reasons to use multiple cells in a nova deployment beyond just +scaling the database and message queue. Cells can also be used to shard a +deployment by hardware generation and feature functionality. When sharding by +hardware generation, it would be natural to setup a host aggregate for each +cell and map flavors to the aggregate. Then when it comes time to decommission +old hardware the deployer could provide new flavors and request that users +resize to the new flavors, before some deadline, which under the covers will +migrate their servers to the new cell with newer hardware. Administrators +could also just cold migrate the servers during a maintenance window to the +new cell. + +Requirements +------------ + +To enable cross-cell resize functionality the following conditions must be met. + +Minimum compute versions +~~~~~~~~~~~~~~~~~~~~~~~~ + +All compute services must be upgraded to 21.0.0 (Ussuri) or later and not be +pinned to older RPC API versions in +:oslo.config:option:`upgrade_levels.compute`. + +Policy configuration +~~~~~~~~~~~~~~~~~~~~ + +The policy rule ``compute:servers:resize:cross_cell`` controls who can perform +a cross-cell resize or cold migrate operation. By default the policy disables +the functionality for *all* users. A microversion is not required to opt into +the behavior, just passing the policy check. As such, it is recommended to +start by allowing only certain users to be able to perform a cross-cell resize +or cold migration, for example by setting the rule to ``rule:admin_api`` or +some other rule for test teams but not normal users until you are comfortable +supporting the feature. + +Compute driver +~~~~~~~~~~~~~~ + +There are no special compute driver implementations required to support the +feature, it is built on existing driver interfaces used during resize and +shelve/unshelve. However, only the libvirt compute driver has integration +testing in the ``nova-multi-cell`` CI job. + +Networking +~~~~~~~~~~ + +The networking API must expose the ``Port Bindings Extended`` API extension +which was added in the 13.0.0 (Rocky) release for Neutron. + +Notifications +------------- + +The types of events and their payloads remain unchanged. The major difference +from same-cell resize is the *publisher_id* may be different in some cases +since some events are sent from the conductor service rather than a compute +service. For example, with same-cell resize the +``instance.resize_revert.start`` notification is sent from the source compute +host in the `finish_revert_resize`_ method but with cross-cell resize that +same notification is sent from the conductor service. + +Obviously the actual message queue sending the notifications would be different +for the source and target cells assuming they use separate transports. + +.. _finish_revert_resize: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/compute/manager.py#L4326 + +Instance actions +---------------- + +The overall instance actions named ``resize``, ``confirmResize`` and +``revertResize`` are the same as same-cell resize. However, the *events* which +make up those actions will be different for cross-cell resize since the event +names are generated based on the compute service methods involved in the +operation and there are different methods involved in a cross-cell resize. +This is important for triage when a cross-cell resize operation fails. + +Scheduling +---------- + +The :ref:`CrossCellWeigher ` is enabled by default. When a +scheduling request allows selecting compute nodes from another cell the weigher +will by default *prefer* hosts within the source cell over hosts from another +cell. However, this behavior is configurable using the +:oslo.config:option:`filter_scheduler.cross_cell_move_weight_multiplier` +configuration option if, for example, you want to drain old cells when resizing +or cold migrating. + +Code flow +--------- + +The end user experience is meant to not change, i.e. status transitions. A +successfully cross-cell resized server will go to ``VERIFY_RESIZE`` status +and from there the user can either confirm or revert the resized server using +the normal `confirmResize`_ and `revertResize`_ server action APIs. + +Under the covers there are some differences from a traditional same-cell +resize: + +* There is no inter-compute interaction. Everything is synchronously + `orchestrated`_ from the (super)conductor service. This uses the + :oslo.config:option:`long_rpc_timeout` configuration option. + +* The orchestration tasks in the (super)conductor service are in charge of + creating a copy of the instance and its related records in the target cell + database at the beginning of the operation, deleting them in case of rollback + or when the resize is confirmed/reverted, and updating the + ``instance_mappings`` table record in the API database. + +* Non-volume-backed servers will have their root disk uploaded to the image + service as a temporary snapshot image just like during the `shelveOffload`_ + operation. When finishing the resize on the destination host in the target + cell that snapshot image will be used to spawn the guest and then the + snapshot image will be deleted. + +.. _confirmResize: https://docs.openstack.org/api-ref/compute/#confirm-resized-server-confirmresize-action +.. _revertResize: https://docs.openstack.org/api-ref/compute/#revert-resized-server-revertresize-action +.. _orchestrated: https://opendev.org/openstack/nova/src/branch/master/nova/conductor/tasks/cross_cell_migrate.py +.. _shelveOffload: https://docs.openstack.org/api-ref/compute/#shelf-offload-remove-server-shelveoffload-action + +Sequence diagram +---------------- + +The following diagrams are current as of the 21.0.0 (Ussuri) release. + +.. NOTE(mriedem): These diagrams could be more detailed, for example breaking + down the individual parts of the conductor tasks and the calls made on + the source and dest compute to the virt driver, cinder and neutron, but + the diagrams could (1) get really complex and (2) become inaccurate with + changes over time. If there are particular sub-sequences that should have + diagrams I would suggest putting those into separate focused diagrams. + +Resize +~~~~~~ + +This is the sequence of calls to get the server to ``VERIFY_RESIZE`` status. + +.. seqdiag:: + + seqdiag { + API; Conductor; Scheduler; Source; Destination; + edge_length = 300; + span_height = 15; + activation = none; + default_note_color = white; + + API ->> Conductor [label = "cast", note = "resize_instance/migrate_server"]; + Conductor => Scheduler [label = "MigrationTask", note = "select_destinations"]; + Conductor -> Conductor [label = "TargetDBSetupTask"]; + Conductor => Destination [label = "PrepResizeAtDestTask", note = "prep_snapshot_based_resize_at_dest"]; + Conductor => Source [label = "PrepResizeAtSourceTask", note = "prep_snapshot_based_resize_at_source"]; + Conductor => Destination [label = "FinishResizeAtDestTask", note = "finish_snapshot_based_resize_at_dest"]; + Conductor -> Conductor [label = "FinishResizeAtDestTask", note = "update instance mapping"]; + } + +Confirm resize +~~~~~~~~~~~~~~ + +This is the sequence of calls when confirming `or deleting`_ a server in +``VERIFY_RESIZE`` status. + +.. seqdiag:: + + seqdiag { + API; Conductor; Source; + edge_length = 300; + span_height = 15; + activation = none; + default_note_color = white; + + API ->> Conductor [label = "cast (or call if deleting)", note = "confirm_snapshot_based_resize"]; + + // separator to indicate everything after this is driven by ConfirmResizeTask + === ConfirmResizeTask === + + Conductor => Source [label = "call", note = "confirm_snapshot_based_resize_at_source"]; + Conductor -> Conductor [note = "hard delete source cell instance"]; + Conductor -> Conductor [note = "update target cell instance status"]; + + } + +.. _or deleting: https://opendev.org/openstack/nova/src/tag/20.0.0/nova/compute/api.py#L2171 + +Revert resize +~~~~~~~~~~~~~ + +This is the sequence of calls when reverting a server in ``VERIFY_RESIZE`` +status. + +.. seqdiag:: + + seqdiag { + API; Conductor; Source; Destination; + edge_length = 300; + span_height = 15; + activation = none; + default_note_color = white; + + API ->> Conductor [label = "cast", note = "revert_snapshot_based_resize"]; + + // separator to indicate everything after this is driven by RevertResizeTask + === RevertResizeTask === + + Conductor -> Conductor [note = "update records from target to source cell"]; + Conductor -> Conductor [note = "update instance mapping"]; + Conductor => Destination [label = "call", note = "revert_snapshot_based_resize_at_dest"]; + Conductor -> Conductor [note = "hard delete target cell instance"]; + Conductor => Source [label = "call", note = "finish_revert_snapshot_based_resize_at_source"]; + + } + +Limitations +----------- + +These are known to not yet be supported in the code: + +* Instances with ports attached that have + :doc:`bandwidth-aware ` resource + provider allocations. Nova falls back to same-cell resize if the server has + such ports. +* Rescheduling to alternative hosts within the same target cell in case the + primary selected host fails the ``prep_snapshot_based_resize_at_dest`` call. + +These may not work since they have not been validated by integration testing: + +* Instances with PCI devices attached. +* Instances with a NUMA topology. + +Other limitations: + +* The config drive associated with the server, if there is one, will be + re-generated on the destination host in the target cell. Therefore if the + server was created with `personality files`_ they will be lost. However, this + is no worse than `evacuating`_ a server that had a config drive when the + source and destination compute host are not on shared storage or when + shelve offloading and unshelving a server with a config drive. If necessary, + the resized server can be rebuilt to regain the personality files. +* The ``_poll_unconfirmed_resizes`` periodic task, which can be + :oslo.config:option:`configured ` to automatically + confirm pending resizes on the target host, *might* not support cross-cell + resizes because doing so would require an :ref:`up-call ` to the + API to confirm the resize and cleanup the source cell database. + +.. _personality files: https://docs.openstack.org/api-guide/compute/server_concepts.html#server-personality +.. _evacuating: https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action + +Troubleshooting +--------------- + +Timeouts +~~~~~~~~ + +Configure a :ref:`service user ` in case the user token +times out, e.g. during the snapshot and download of a large server image. + +If RPC calls are timing out with a ``MessagingTimeout`` error in the logs, +check the :oslo.config:option:`long_rpc_timeout` option to see if it is high +enough though the default value (30 minutes) should be sufficient. + +Recovering from failure +~~~~~~~~~~~~~~~~~~~~~~~ + +The orchestration tasks in conductor that drive the operation are built with +rollbacks so each part of the operation can be rolled back in order if a +subsequent task fails. + +The thing to keep in mind is the ``instance_mappings`` record in the API DB +is the authority on where the instance "lives" and that is where the API will +go to show the instance in a ``GET /servers/{server_id}`` call or any action +performed on the server, including deleting it. + +So if the resize fails and there is a copy of the instance and its related +records in the target cell, the tasks should automatically delete them but if +not you can hard-delete the records from whichever cell is *not* the one in the +``instance_mappings`` table. + +If the instance is in ``ERROR`` status, check the logs in both the source +and destination compute service to see if there is anything that needs to be +manually recovered, for example volume attachments or port bindings, and also +check the (super)conductor service logs. Assuming volume attachments and +port bindings are OK (current and pointing at the correct host), then try hard +rebooting the server to get it back to ``ACTIVE`` status. If that fails, you +may need to `rebuild`_ the server on the source host. Note that the guest's +disks on the source host are not deleted until the resize is confirmed so if +there is an issue prior to confirm or confirm itself fails, the guest disks +should still be available for rebuilding the instance if necessary. + +.. _rebuild: https://docs.openstack.org/api-ref/compute/#rebuild-server-rebuild-action diff --git a/doc/source/admin/configuration/hypervisor-basics.rst b/doc/source/admin/configuration/hypervisor-basics.rst deleted file mode 100644 index 9ac1e785e1f..00000000000 --- a/doc/source/admin/configuration/hypervisor-basics.rst +++ /dev/null @@ -1,14 +0,0 @@ -=============================== -Hypervisor Configuration Basics -=============================== - -The node where the ``nova-compute`` service is installed and operates on the -same node that runs all of the virtual machines. This is referred to as the -compute node in this guide. - -By default, the selected hypervisor is KVM. To change to another hypervisor, -change the ``virt_type`` option in the ``[libvirt]`` section of ``nova.conf`` -and restart the ``nova-compute`` service. - -Specific options for particular hypervisors can be found in -the following sections. diff --git a/doc/source/admin/configuration/hypervisor-hyper-v.rst b/doc/source/admin/configuration/hypervisor-hyper-v.rst index 0959c7cad1b..79d72cad052 100644 --- a/doc/source/admin/configuration/hypervisor-hyper-v.rst +++ b/doc/source/admin/configuration/hypervisor-hyper-v.rst @@ -19,8 +19,9 @@ compute nodes: - Windows Server 2012 R2 Server and Core (with the Hyper-V role enabled) - Hyper-V Server + Hyper-V configuration -~~~~~~~~~~~~~~~~~~~~~ +--------------------- The only OpenStack services required on a Hyper-V node are ``nova-compute`` and ``neutron-hyperv-agent``. Regarding the resources needed for this host you have @@ -34,7 +35,7 @@ configuration information should work for the Windows 2012 and 2012 R2 platforms. Local storage considerations ----------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Hyper-V compute node needs to have ample storage for storing the virtual machine images running on the compute nodes. You may use a single volume for @@ -43,7 +44,7 @@ all, or partition it into an OS volume and VM volume. .. _configure-ntp-windows: Configure NTP -------------- +~~~~~~~~~~~~~ Network time services must be configured to ensure proper operation of the OpenStack nodes. To set network time on your Windows host you must run the @@ -52,7 +53,7 @@ following commands: .. code-block:: bat C:\>net stop w32time - C:\>w32tm /config /manualpeerlist:pool.ntp.org,0x8 /syncfromflags:MANUAL + C:\>w32tm /config "/manualpeerlist:pool.ntp.org,0x8" /syncfromflags:MANUAL C:\>net start w32time Keep in mind that the node will have to be time synchronized with the other @@ -61,7 +62,7 @@ server. Note that in case of an Active Directory environment, you may do this only for the AD Domain Controller. Configure Hyper-V virtual switching ------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Information regarding the Hyper-V virtual Switch can be found in the `Hyper-V Virtual Switch Overview`__. @@ -83,7 +84,7 @@ following PowerShell may be used: __ https://technet.microsoft.com/en-us/library/hh831823.aspx Enable iSCSI initiator service ------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To prepare the Hyper-V node to be able to attach to volumes provided by cinder you must first make sure the Windows iSCSI initiator service is running and @@ -95,7 +96,7 @@ started automatically. PS C:\> Start-Service MSiSCSI Configure shared nothing live migration ---------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Detailed information on the configuration of live migration can be found in `this guide`__ @@ -158,7 +159,7 @@ Additional Requirements: __ https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/manage/Use-live-migration-without-Failover-Clustering-to-move-a-virtual-machine How to setup live migration on Hyper-V --------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable 'shared nothing live' migration, run the 3 instructions below on each Hyper-V host: @@ -175,15 +176,16 @@ Hyper-V host: provide live migration. Additional Reading ------------------- +~~~~~~~~~~~~~~~~~~ This article clarifies the various live migration options in Hyper-V: `Hyper-V Live Migration of Yesterday `_ + Install nova-compute using OpenStack Hyper-V installer -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------------ In case you want to avoid all the manual setup, you can use Cloudbase Solutions' installer. You can find it here: @@ -201,28 +203,26 @@ its features can be found here: `Cloudbase `_ + .. _windows-requirements: Requirements -~~~~~~~~~~~~ +------------ Python ------- - -Python 2.7 32bit must be installed as most of the libraries are not working -properly on the 64bit version. +~~~~~~ **Setting up Python prerequisites** -#. Download and install Python 2.7 using the MSI installer from here: +#. Download and install Python 3.8 using the MSI installer from the `Python + website`__. - `python-2.7.3.msi download - `_ + .. __: https://www.python.org/downloads/windows/ .. code-block:: none - PS C:\> $src = "https://www.python.org/ftp/python/2.7.3/python-2.7.3.msi" - PS C:\> $dest = "$env:temp\python-2.7.3.msi" + PS C:\> $src = "https://www.python.org/ftp/python/3.8.8/python-3.8.8.exe" + PS C:\> $dest = "$env:temp\python-3.8.8.exe" PS C:\> Invoke-WebRequest -Uri $src -OutFile $dest PS C:\> Unblock-File $dest PS C:\> Start-Process $dest @@ -233,34 +233,18 @@ properly on the 64bit version. .. code-block:: none PS C:\> $oldPath = [System.Environment]::GetEnvironmentVariable("Path") - PS C:\> $newPath = $oldPath + ";C:\python27\;C:\python27\Scripts\" + PS C:\> $newPath = $oldPath + ";C:\python38\;C:\python38\Scripts\" PS C:\> [System.Environment]::SetEnvironmentVariable("Path", $newPath, [System.EnvironmentVariableTarget]::User Python dependencies -------------------- - -The following packages need to be downloaded and manually installed: - -``setuptools`` - https://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11.win32-py2.7.exe - -``pip`` - https://pip.pypa.io/en/latest/installing/ - -``PyMySQL`` - http://codegood.com/download/10/ - -``PyWin32`` - https://sourceforge.net/projects/pywin32/files/pywin32/Build%20217/pywin32-217.win32-py2.7.exe - -``Greenlet`` - http://www.lfd.uci.edu/~gohlke/pythonlibs/#greenlet - -``PyCryto`` - http://www.voidspace.org.uk/downloads/pycrypto26/pycrypto-2.6.win32-py2.7.exe +~~~~~~~~~~~~~~~~~~~ The following packages must be installed with pip: +* ``pywin32`` +* ``pymysql`` +* ``greenlet`` +* ``pycryto`` * ``ecdsa`` * ``amqp`` * ``wmi`` @@ -271,8 +255,9 @@ The following packages must be installed with pip: PS C:\> pip install amqp PS C:\> pip install wmi + Other dependencies ------------------- +~~~~~~~~~~~~~~~~~~ ``qemu-img`` is required for some of the image related operations. You can get it from here: http://qemu.weilnetz.de/. You must make sure that the @@ -281,7 +266,7 @@ it from here: http://qemu.weilnetz.de/. You must make sure that the Some Python packages need to be compiled, so you may use MinGW or Visual Studio. You can get MinGW from here: http://sourceforge.net/projects/mingw/. You must configure which compiler is to be used for this purpose by using the -``distutils.cfg`` file in ``$Python27\Lib\distutils``, which can contain: +``distutils.cfg`` file in ``$Python38\Lib\distutils``, which can contain: .. code-block:: ini @@ -291,37 +276,30 @@ You must configure which compiler is to be used for this purpose by using the As a last step for setting up MinGW, make sure that the MinGW binaries' directories are set up in PATH. + Install nova-compute -~~~~~~~~~~~~~~~~~~~~ +-------------------- Download the nova code ----------------------- +~~~~~~~~~~~~~~~~~~~~~~ #. Use Git to download the necessary source code. The installer to run Git on Windows can be downloaded here: - https://github.com/msysgit/msysgit/releases/download/Git-1.9.2-preview20140411/Git-1.9.2-preview20140411.exe + https://gitforwindows.org/ #. Download the installer. Once the download is complete, run the installer and follow the prompts in the installation wizard. The default should be acceptable for the purposes of this guide. - .. code-block:: none - - PS C:\> $src = "https://github.com/msysgit/msysgit/releases/download/Git-1.9.2-preview20140411/Git-1.9.2-preview20140411.exe" - PS C:\> $dest = "$env:temp\Git-1.9.2-preview20140411.exe" - PS C:\> Invoke-WebRequest -Uri $src -OutFile $dest - PS C:\> Unblock-File $dest - PS C:\> Start-Process $dest - #. Run the following to clone the nova code. .. code-block:: none - PS C:\> git.exe clone https://git.openstack.org/openstack/nova + PS C:\> git.exe clone https://opendev.org/openstack/nova Install nova-compute service ----------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To install ``nova-compute``, run: @@ -331,7 +309,7 @@ To install ``nova-compute``, run: PS C:\> python setup.py install Configure nova-compute ----------------------- +~~~~~~~~~~~~~~~~~~~~~~ The ``nova.conf`` file must be placed in ``C:\etc\nova`` for running OpenStack on Hyper-V. Below is a sample ``nova.conf`` for Windows: @@ -348,7 +326,7 @@ on Hyper-V. Below is a sample ``nova.conf`` for Windows: use_cow_images = true force_config_drive = false injected_network_template = C:\Program Files (x86)\OpenStack\Nova\etc\interfaces.template - policy_file = C:\Program Files (x86)\OpenStack\Nova\etc\policy.json + policy_file = C:\Program Files (x86)\OpenStack\Nova\etc\policy.yaml mkisofs_cmd = C:\Program Files (x86)\OpenStack\Nova\bin\mkisofs.exe allow_resize_to_same_host = true running_deleted_instance_action = reap @@ -366,17 +344,19 @@ on Hyper-V. Below is a sample ``nova.conf`` for Windows: logfile = nova-compute.log instance_usage_audit = true instance_usage_audit_period = hour - use_neutron = True + [glance] api_servers = http://IP_ADDRESS:9292 + [neutron] - url = http://IP_ADDRESS:9696 + endpoint_override = http://IP_ADDRESS:9696 auth_strategy = keystone project_name = service username = neutron password = Passw0rd auth_url = http://IP_ADDRESS:5000/v3 auth_type = password + [hyperv] vswitch_name = newVSwitch0 limit_cpu_features = false @@ -385,12 +365,13 @@ on Hyper-V. Below is a sample ``nova.conf`` for Windows: config_drive_cdrom = true dynamic_memory_ratio = 1 enable_instance_metrics_collection = true + [rdp] enabled = true html5_proxy_base_url = https://IP_ADDRESS:4430 Prepare images for use with Hyper-V ------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hyper-V currently supports only the VHD and VHDX file format for virtual machine instances. Detailed instructions for installing virtual machines on @@ -404,8 +385,12 @@ image to `glance` using the `openstack-client`: .. code-block:: none - PS C:\> openstack image create --name "VM_IMAGE_NAME" --property hypervisor_type=hyperv --public \ - --container-format bare --disk-format vhd + PS C:\> openstack image create \ + --name "VM_IMAGE_NAME" \ + --property hypervisor_type=hyperv \ + --public \ + --container-format bare \ + --disk-format vhd .. note:: @@ -419,12 +404,12 @@ image to `glance` using the `openstack-client`: PS C:\> New-VHD DISK_NAME.vhd -SizeBytes VHD_SIZE Inject interfaces and routes ----------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``interfaces.template`` file describes the network interfaces and routes available on your system and how to activate them. You can specify the location -of the file with the ``injected_network_template`` configuration option in -``/etc/nova/nova.conf``. +of the file with the :oslo.config:option:`injected_network_template` +configuration option in ``nova.conf``. .. code-block:: ini @@ -433,7 +418,7 @@ of the file with the ``injected_network_template`` configuration option in A default template exists in ``nova/virt/interfaces.template``. Run Compute with Hyper-V ------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~ To start the ``nova-compute`` service, run this command from a console in the Windows server: @@ -442,8 +427,9 @@ Windows server: PS C:\> C:\Python27\python.exe c:\Python27\Scripts\nova-compute --config-file c:\etc\nova\nova.conf -Troubleshoot Hyper-V configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Troubleshooting +--------------- * I ran the :command:`nova-manage service list` command from my controller; however, I'm not seeing smiley faces for Hyper-V compute nodes, what do I do? diff --git a/doc/source/admin/configuration/hypervisor-ironic.rst b/doc/source/admin/configuration/hypervisor-ironic.rst new file mode 100644 index 00000000000..bba01deffa5 --- /dev/null +++ b/doc/source/admin/configuration/hypervisor-ironic.rst @@ -0,0 +1,58 @@ +====== +Ironic +====== + +Introduction +------------ + +The ironic hypervisor driver wraps the Bare Metal (ironic) API, +enabling Nova to provision baremetal resources using the same +user-facing API as for server management. + +This is the only driver in nova where one compute service can map to many +hosts, meaning a ``nova-compute`` service can manage multiple ``ComputeNodes``. +An ironic driver managed compute service uses the ironic ``node uuid`` for the +compute node ``hypervisor_hostname`` (nodename) and ``uuid`` fields. The +relationship of ``instance:compute node:ironic node`` is 1:1:1. + +Scheduling of bare metal nodes is based on custom resource classes, specified +via the ``resource_class`` property on a node and a corresponding resource +property on a flavor (see the :ironic-doc:`flavor documentation +`). +The RAM and CPU settings on a flavor are ignored, and the disk is only used to +determine the root partition size when a partition image is used (see the +:ironic-doc:`image documentation +`). + + +Configuration +------------- + +- :ironic-doc:`Configure the Compute service to use the Bare Metal service + `. + +- :ironic-doc:`Create flavors for use with the Bare Metal service + `. + +- :ironic-doc:`Conductors Groups `. + + +Scaling and performance issues +------------------------------ + +- The ``update_available_resource`` periodic task reports all the resources + managed by Ironic. Depending the number of nodes, it can take a lot of time. + The nova-compute will not perform any other operations when this task is + running. You can use conductor groups to help scale, by setting + :oslo.config:option:`ironic.partition_key`. + + +Known limitations / Missing features +------------------------------------ + +* Migrate +* Resize +* Snapshot +* Pause +* Shelve +* Evacuate diff --git a/doc/source/admin/configuration/hypervisor-kvm.rst b/doc/source/admin/configuration/hypervisor-kvm.rst index 159d065a855..fdadde32f9a 100644 --- a/doc/source/admin/configuration/hypervisor-kvm.rst +++ b/doc/source/admin/configuration/hypervisor-kvm.rst @@ -2,9 +2,6 @@ KVM === -.. todo:: This is really installation guide material and should probably be - moved. - KVM is configured as the default hypervisor for Compute. .. note:: @@ -15,16 +12,6 @@ KVM is configured as the default hypervisor for Compute. on qemu-kvm, which installs ``/lib/udev/rules.d/45-qemu-kvm.rules``, which sets the correct permissions on the ``/dev/kvm`` device node. -To enable KVM explicitly, add the following configuration options to the -``/etc/nova/nova.conf`` file: - -.. code-block:: ini - - compute_driver = libvirt.LibvirtDriver - - [libvirt] - virt_type = kvm - The KVM hypervisor supports the following virtual machine image formats: * Raw @@ -35,38 +22,47 @@ The KVM hypervisor supports the following virtual machine image formats: This section describes how to enable KVM on your system. For more information, see the following distribution-specific documentation: -* `Fedora: Virtualization Getting Started Guide `_ - from the Fedora 22 documentation. -* `Ubuntu: KVM/Installation `_ from the Community Ubuntu documentation. -* `Debian: Virtualization with KVM `_ from the Debian handbook. -* `Red Hat Enterprise Linux: Installing virtualization packages on an existing - Red Hat Enterprise Linux system `_ from the ``Red Hat Enterprise Linux - Virtualization Host Configuration and Guest Installation Guide``. -* `openSUSE: Installing KVM `_ - from the openSUSE Virtualization with KVM manual. -* `SLES: Installing KVM `_ from the SUSE Linux Enterprise Server - ``Virtualization Guide``. +* `Fedora: Virtualization Getting Started Guide`__ +* `Ubuntu: KVM/Installation`__ +* `Debian: KVM Guide`__ +* `Red Hat Enterprise Linux (RHEL): Getting started with virtualization`__ +* `openSUSE: Setting Up a KVM VM Host Server`__ +* `SLES: Virtualization with KVM`__. + +.. __: https://docs.fedoraproject.org/en-US/quick-docs/getting-started-with-virtualization/ +.. __: https://help.ubuntu.com/community/KVM/Installation +.. __: https://wiki.debian.org/KVM +.. __: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/configuring_and_managing_virtualization/getting-started-with-virtualization-in-rhel-8_configuring-and-managing-virtualization +.. __: https://doc.opensuse.org/documentation/leap/virtualization/html/book-virt/cha-qemu-host.html +.. __: https://documentation.suse.com/sles/11-SP4/html/SLES-all/book-kvm.html + + +Configuration +------------- + +To enable KVM explicitly, add the following configuration options to the +``/etc/nova/nova.conf`` file: + +.. code-block:: ini + + [DEFAULT] + compute_driver = libvirt.LibvirtDriver + + [libvirt] + virt_type = kvm + .. _enable-kvm: Enable KVM -~~~~~~~~~~ +---------- The following sections outline how to enable KVM based hardware virtualization on different architectures and platforms. To perform these steps, you must be logged in as the ``root`` user. -For x86 based systems ---------------------- +For x86-based systems +~~~~~~~~~~~~~~~~~~~~~ #. To determine whether the ``svm`` or ``vmx`` CPU extensions are present, run this command: @@ -135,8 +131,7 @@ system or find a system with this support. and enable the VT option. If KVM acceleration is not supported, configure Compute to use a different -hypervisor, such as ``QEMU`` or ``Xen``. See :ref:`compute_qemu` or -:ref:`compute_xen_api` for details. +hypervisor, such as :ref:`QEMU `. These procedures help you load the kernel modules for Intel-based and AMD-based processors if they do not load automatically during KVM installation. @@ -176,8 +171,8 @@ Add these lines to ``/etc/modules`` file so that these modules load on reboot: kvm kvm-amd -For POWER based systems ------------------------ +For POWER-based systems +~~~~~~~~~~~~~~~~~~~~~~~ KVM as a hypervisor is supported on POWER system's PowerNV platform. @@ -225,15 +220,22 @@ KVM as a hypervisor is supported on POWER system's PowerNV platform. Because a KVM installation can change user group membership, you might need to log in again for changes to take effect. +For AArch64-based systems +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. todo:: Populate this section. + + Configure Compute backing storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------------- Backing Storage is the storage used to provide the expanded operating system image, and any ephemeral storage. Inside the virtual machine, this is normally presented as two virtual hard disks (for example, ``/dev/vda`` and ``/dev/vdb`` respectively). However, inside OpenStack, this can be derived from one of these methods: ``lvm``, ``qcow``, ``rbd`` or ``flat``, chosen using the -``images_type`` option in ``nova.conf`` on the compute node. +:oslo.config:option:`libvirt.images_type` option in ``nova.conf`` on the +compute node. .. note:: @@ -241,7 +243,8 @@ methods: ``lvm``, ``qcow``, ``rbd`` or ``flat``, chosen using the Flat back end uses either raw or QCOW2 storage. It never uses a backing store, so when using QCOW2 it copies an image rather than creating an overlay. By default, it creates raw files but will use QCOW2 when creating a - disk from a QCOW2 if ``force_raw_images`` is not set in configuration. + disk from a QCOW2 if :oslo.config:option:`force_raw_images` is not set in + configuration. QCOW is the default backing store. It uses a copy-on-write philosophy to delay allocation of storage until it is actually needed. This means that the space @@ -255,88 +258,134 @@ reserved on the physical disk. Local `LVM volumes `__ can also be -used. Set ``images_volume_group = nova_local`` where ``nova_local`` is the name -of the LVM group you have created. +used. Set the :oslo.config:option:`libvirt.images_volume_group` configuration +option to the name of the LVM group you have created. -Specify the CPU model of KVM guests -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The Compute service enables you to control the guest CPU model that is exposed -to KVM virtual machines. Use cases include: +Direct download of images from Ceph +----------------------------------- -* To maximize performance of virtual machines by exposing new host CPU features - to the guest +When the Glance image service is set up with the Ceph backend and Nova is using +a local ephemeral store (``[libvirt]/images_type!=rbd``), it is possible to +configure Nova to download images directly into the local compute image cache. -* To ensure a consistent default CPU across all machines, removing reliance of - variable QEMU defaults +With the following configuration, images are downloaded using the RBD export +command instead of using the Glance HTTP API. In some situations, especially +for very large images, this could be substantially faster and can improve the +boot times of instances. -In libvirt, the CPU is specified by providing a base CPU model name (which is a -shorthand for a set of feature flags), a set of additional feature flags, and -the topology (sockets/cores/threads). The libvirt KVM driver provides a number -of standard CPU model names. These models are defined in the -``/usr/share/libvirt/cpu_map.xml`` file. Check this file to determine which -models are supported by your local installation. +On the Glance API node in ``glance-api.conf``: -Two Compute configuration options in the ``[libvirt]`` group of ``nova.conf`` -define which type of CPU model is exposed to the hypervisor when using KVM: -``cpu_mode`` and ``cpu_model``. +.. code-block:: ini -The ``cpu_mode`` option can take one of the following values: ``none``, -``host-passthrough``, ``host-model``, and ``custom``. + [DEFAULT] + show_image_direct_url=true -Host model (default for KVM & QEMU) ------------------------------------ +On the Nova compute node in nova.conf: -If your ``nova.conf`` file contains ``cpu_mode=host-model``, libvirt identifies -the CPU model in ``/usr/share/libvirt/cpu_map.xml`` file that most closely -matches the host, and requests additional CPU flags to complete the match. This -configuration provides the maximum functionality and performance and maintains -good reliability and compatibility if the guest is migrated to another host -with slightly different host CPUs. +.. code-block:: ini -Host pass through ------------------ + [glance] + enable_rbd_download=true + rbd_user=glance + rbd_pool=images + rbd_ceph_conf=/etc/ceph/ceph.conf + rbd_connect_timeout=5 -If your ``nova.conf`` file contains ``cpu_mode=host-passthrough``, libvirt -tells KVM to pass through the host CPU with no modifications. The difference -to host-model, instead of just matching feature flags, every last detail of the -host CPU is matched. This gives the best performance, and can be important to -some apps which check low level CPU details, but it comes at a cost with -respect to migration. The guest can only be migrated to a matching host CPU. -Custom ------- +Nested guest support +-------------------- -If your ``nova.conf`` file contains ``cpu_mode=custom``, you can explicitly -specify one of the supported named models using the cpu_model configuration -option. For example, to configure the KVM guests to expose Nehalem CPUs, your -``nova.conf`` file should contain: +You may choose to enable support for nested guests --- that is, allow +your Nova instances to themselves run hardware-accelerated virtual +machines with KVM. Doing so requires a module parameter on +your KVM kernel module, and corresponding ``nova.conf`` settings. -.. code-block:: ini +Host configuration +~~~~~~~~~~~~~~~~~~ - [libvirt] - cpu_mode = custom - cpu_model = Nehalem +To enable nested KVM guests, your compute node must load the +``kvm_intel`` or ``kvm_amd`` module with ``nested=1``. You can enable +the ``nested`` parameter permanently, by creating a file named +``/etc/modprobe.d/kvm.conf`` and populating it with the following +content: + +.. code-block:: none + + options kvm_intel nested=1 + options kvm_amd nested=1 + +A reboot may be required for the change to become effective. + +Nova configuration +~~~~~~~~~~~~~~~~~~ + +To support nested guests, you must set your +:oslo.config:option:`libvirt.cpu_mode` configuration to one of the following +options: -None (default for all libvirt-driven hypervisors other than KVM & QEMU) ------------------------------------------------------------------------ +Host passthrough (``host-passthrough``) + In this mode, nested virtualization is automatically enabled once + the KVM kernel module is loaded with nesting support. -If your ``nova.conf`` file contains ``cpu_mode=none``, libvirt does not specify -a CPU model. Instead, the hypervisor chooses the default model. + .. code-block:: ini -Guest agent support -------------------- + [libvirt] + cpu_mode = host-passthrough -Use guest agents to enable optional access between compute nodes and guests -through a socket, using the QMP protocol. + However, do consider the other implications that + :doc:`host passthrough ` mode has on compute + functionality. + +Host model (``host-model``) + In this mode, nested virtualization is automatically enabled once + the KVM kernel module is loaded with nesting support, **if** the + matching CPU model exposes the ``vmx`` feature flag to guests by + default (you can verify this with ``virsh capabilities`` on your + compute node). If your CPU model does not pass in the ``vmx`` flag, + you can force it with :oslo.config:option:`libvirt.cpu_model_extra_flags`: + + .. code-block:: ini + + [libvirt] + cpu_mode = host-model + cpu_model_extra_flags = vmx + + Again, consider the other implications that apply to the + :doc:`host model ` mode. + +Custom (``custom``) + In custom mode, the same considerations apply as in host-model mode, + but you may *additionally* want to ensure that libvirt passes not only + the ``vmx``, but also the ``pcid`` flag to its guests: + + .. code-block:: ini + + [libvirt] + cpu_mode = custom + cpu_models = IvyBridge + cpu_model_extra_flags = vmx,pcid + +More information on CPU models can be found in :doc:`/admin/cpu-models`. + +Limitations +~~~~~~~~~~~~ + +When enabling nested guests, you should be aware of (and inform your +users about) certain limitations that are currently inherent to nested +KVM virtualization. Most importantly, guests using nested +virtualization will, *while nested guests are running*, + +* fail to complete live migration; +* fail to resume from suspend. + +See `the KVM documentation +`_ for more +information on these limitations. -To enable this feature, you must set ``hw_qemu_guest_agent=yes`` as a metadata -parameter on the image you wish to use to create the guest-agent-capable -instances from. You can explicitly disable the feature by setting -``hw_qemu_guest_agent=no`` in the image metadata. KVM performance tweaks -~~~~~~~~~~~~~~~~~~~~~~ +---------------------- The `VHostNet `_ kernel module improves network performance. To load the kernel module, run the following command as @@ -346,8 +395,9 @@ root: # modprobe vhost_net -Troubleshoot KVM -~~~~~~~~~~~~~~~~ + +Troubleshooting +--------------- Trying to launch a new virtual machine instance fails with the ``ERROR`` state, and the following error appears in the ``/var/log/nova/nova-compute.log`` file: diff --git a/doc/source/admin/configuration/hypervisor-lxc.rst b/doc/source/admin/configuration/hypervisor-lxc.rst index eb8d51f83ef..bc0988ccf6e 100644 --- a/doc/source/admin/configuration/hypervisor-lxc.rst +++ b/doc/source/admin/configuration/hypervisor-lxc.rst @@ -24,11 +24,17 @@ LXC than other hypervisors. the hypervisor. See the `hypervisor support matrix `_ for details. -To enable LXC, ensure the following options are set in ``/etc/nova/nova.conf`` -on all hosts running the ``nova-compute`` service. + +Configuration +------------- + +To enable LXC, configure :oslo.config:option:`DEFAULT.compute_driver` = +``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` = +``lxc``. For example: .. code-block:: ini + [DEFAULT] compute_driver = libvirt.LibvirtDriver [libvirt] diff --git a/doc/source/admin/configuration/hypervisor-powervm.rst b/doc/source/admin/configuration/hypervisor-powervm.rst index 9b16c3a21d0..a2947ff6082 100644 --- a/doc/source/admin/configuration/hypervisor-powervm.rst +++ b/doc/source/admin/configuration/hypervisor-powervm.rst @@ -1,8 +1,10 @@ +======= PowerVM ======= Introduction ------------ + OpenStack Compute supports the PowerVM hypervisor through `NovaLink`_. In the NovaLink architecture, a thin NovaLink virtual machine running on the Power system manages virtualization for that system. The ``nova-compute`` service @@ -12,22 +14,27 @@ Management Console) is needed. .. _NovaLink: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8eig/p8eig_kickoff.htm + Configuration ------------- + In order to function properly, the ``nova-compute`` service must be executed by a member of the ``pvm_admin`` group. Use the ``usermod`` command to add the -user. For example, to add the ``stacker`` user to the ``pvm_admin`` group, execute:: +user. For example, to add the ``stacker`` user to the ``pvm_admin`` group, execute: + +.. code-block:: console - sudo usermod -a -G pvm_admin stacker + # usermod -a -G pvm_admin stacker The user must re-login for the change to take effect. -To enable the PowerVM compute driver, set the following configuration option -in the ``/etc/nova/nova.conf`` file: +To enable the PowerVM compute driver, configure +:oslo.config:option:`DEFAULT.compute_driver` = ``powervm.PowerVMDriver``. For +example: .. code-block:: ini - [Default] + [DEFAULT] compute_driver = powervm.PowerVMDriver The PowerVM driver supports two types of storage for ephemeral disks: @@ -59,9 +66,10 @@ processor, whereas 0.05 means 1/20th of a physical processor. E.g.: Volume Support -------------- + Volume support is provided for the PowerVM virt driver via Cinder. Currently, -the only supported volume protocol is `vSCSI`_ Fibre Channel. Attach, detach, +the only supported volume protocol is `vSCSI`__ Fibre Channel. Attach, detach, and extend are the operations supported by the PowerVM vSCSI FC volume adapter. -Boot from volume is not yet supported. +:term:`Boot From Volume` is not yet supported. -.. _vSCSI: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8hat/p8hat_virtualscsi.htm +.. __: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8hat/p8hat_virtualscsi.htm diff --git a/doc/source/admin/configuration/hypervisor-qemu.rst b/doc/source/admin/configuration/hypervisor-qemu.rst index 6849b89c280..6cc72b04ae6 100644 --- a/doc/source/admin/configuration/hypervisor-qemu.rst +++ b/doc/source/admin/configuration/hypervisor-qemu.rst @@ -19,17 +19,24 @@ The typical uses cases for QEMU are development or testing purposes, where the hypervisor does not support native virtualization for guests. -To enable QEMU, add these settings to ``nova.conf``: + +Configuration +------------- + +To enable QEMU, configure :oslo.config:option:`DEFAULT.compute_driver` = +``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` = +``qemu``. For example: .. code-block:: ini + [DEFAULT] compute_driver = libvirt.LibvirtDriver [libvirt] virt_type = qemu -For some operations you may also have to install the -:command:`guestmount` utility: +For some operations you may also have to install the :command:`guestmount` +utility: On Ubuntu: diff --git a/doc/source/admin/configuration/hypervisor-virtuozzo.rst b/doc/source/admin/configuration/hypervisor-virtuozzo.rst index 13c63daba62..354818949e0 100644 --- a/doc/source/admin/configuration/hypervisor-virtuozzo.rst +++ b/doc/source/admin/configuration/hypervisor-virtuozzo.rst @@ -12,11 +12,17 @@ image. Some OpenStack Compute features may be missing when running with Virtuozzo as the hypervisor. See :doc:`/user/support-matrix` for details. -To enable Virtuozzo Containers, set the following options in -``/etc/nova/nova.conf`` on all hosts running the ``nova-compute`` service. + +Configuration +------------- + +To enable LXC, configure :oslo.config:option:`DEFAULT.compute_driver` = +``libvirt.LibvirtDriver`` and :oslo.config:option:`libvirt.virt_type` = +``parallels``. For example: .. code-block:: ini + [DEFAULT] compute_driver = libvirt.LibvirtDriver force_raw_images = False @@ -31,6 +37,7 @@ To enable Virtuozzo Virtual Machines, set the following options in .. code-block:: ini + [DEFAULT] compute_driver = libvirt.LibvirtDriver [libvirt] diff --git a/doc/source/admin/configuration/hypervisor-vmware.rst b/doc/source/admin/configuration/hypervisor-vmware.rst index c7ffd11fe2f..9de1d0c2aef 100644 --- a/doc/source/admin/configuration/hypervisor-vmware.rst +++ b/doc/source/admin/configuration/hypervisor-vmware.rst @@ -3,7 +3,7 @@ VMware vSphere ============== Introduction -~~~~~~~~~~~~ +------------ OpenStack Compute supports the VMware vSphere product family and enables access to advanced features such as vMotion, High Availability, and Dynamic Resource @@ -23,15 +23,16 @@ vSphere features. The following sections describe how to configure the VMware vCenter driver. + High-level architecture -~~~~~~~~~~~~~~~~~~~~~~~ +----------------------- The following diagram shows a high-level view of the VMware driver architecture: .. rubric:: VMware driver architecture -.. figure:: /figures/vmware-nova-driver-architecture.jpg +.. figure:: /_static/images/vmware-nova-driver-architecture.jpg :width: 100% As the figure shows, the OpenStack Compute Scheduler sees three hypervisors @@ -56,12 +57,12 @@ visible in the OpenStack dashboard and you can manage it as you would any other OpenStack VM. You can perform advanced vSphere operations in vCenter while you configure OpenStack resources such as VMs through the OpenStack dashboard. -The figure does not show how networking fits into the architecture. Both -``nova-network`` and the OpenStack Networking Service are supported. For +The figure does not show how networking fits into the architecture. For details, see :ref:`vmware-networking`. + Configuration overview -~~~~~~~~~~~~~~~~~~~~~~ +---------------------- To get started with the VMware vCenter driver, complete the following high-level steps: @@ -73,13 +74,12 @@ high-level steps: #. Load desired VMDK images into the Image service. See :ref:`vmware-images`. -#. Configure networking with either ``nova-network`` or - the Networking service. See :ref:`vmware-networking`. +#. Configure the Networking service (neutron). See :ref:`vmware-networking`. .. _vmware-prereqs: Prerequisites and limitations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------- Use the following list to prepare a vSphere environment that runs with the VMware vCenter driver: @@ -110,8 +110,7 @@ Networking Security groups If you use the VMware driver with OpenStack Networking and the NSX plug-in, - security groups are supported. If you use ``nova-network``, security groups - are not supported. + security groups are supported. .. note:: @@ -145,8 +144,9 @@ assigned to a separate availability zone. This is required as the OpenStack Block Storage VMDK driver does not currently work across multiple vCenter installations. + VMware vCenter service account -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------ OpenStack integration requires a vCenter service account with the following minimum permissions. Apply the permissions to the ``Datacenter`` root object, @@ -417,10 +417,11 @@ and select the :guilabel:`Propagate to Child Objects` option. - Import - + .. _vmware-vcdriver: VMware vCenter driver -~~~~~~~~~~~~~~~~~~~~~ +--------------------- Use the VMware vCenter driver (VMwareVCDriver) to connect OpenStack Compute with vCenter. This recommended configuration enables access through vCenter to @@ -428,7 +429,7 @@ advanced vSphere features like vMotion, High Availability, and Dynamic Resource Scheduling (DRS). VMwareVCDriver configuration options ------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add the following VMware-specific configuration options to the ``nova.conf`` file: @@ -481,10 +482,11 @@ against host failures. Many ``nova.conf`` options are relevant to libvirt but do not apply to this driver. + .. _vmware-images: Images with VMware vSphere -~~~~~~~~~~~~~~~~~~~~~~~~~~ +-------------------------- The vCenter driver supports images in the VMDK format. Disks in this format can be obtained from VMware Fusion or from an ESX environment. It is also possible @@ -495,7 +497,7 @@ sections provide additional details on the supported disks and the commands used for conversion and upload. Supported image types ---------------------- +~~~~~~~~~~~~~~~~~~~~~ Upload images to the OpenStack Image service in VMDK format. The following VMDK disk types are supported: @@ -748,7 +750,7 @@ of the supported guest OS: - Windows XP Professional Convert and load images ------------------------ +~~~~~~~~~~~~~~~~~~~~~~~ Using the ``qemu-img`` utility, disk images in several formats (such as, qcow2) can be converted to the VMDK format. @@ -809,12 +811,12 @@ is lsiLogic, which is SCSI, so you can omit the ``vmware_adaptertype`` property if you are certain that the image adapter type is lsiLogic. Tag VMware images ------------------ +~~~~~~~~~~~~~~~~~ In a mixed hypervisor environment, OpenStack Compute uses the ``hypervisor_type`` tag to match images to the correct hypervisor type. For VMware images, set the hypervisor type to ``vmware``. Other valid hypervisor -types include: ``hyperv``, ``ironic``, ``lxc``, ``qemu``, ``uml``, and ``xen``. +types include: ``hyperv``, ``ironic``, ``lxc``, and ``qemu``. Note that ``qemu`` is used for both QEMU and KVM hypervisor types. .. code-block:: console @@ -829,7 +831,7 @@ Note that ``qemu`` is used for both QEMU and KVM hypervisor types. ubuntu-thick-scsi < ubuntuLTS-flat.vmdk Optimize images ---------------- +~~~~~~~~~~~~~~~ Monolithic Sparse disks are considerably faster to download but have the overhead of an additional conversion step. When imported into ESX, sparse disks @@ -888,7 +890,7 @@ In the previous cases, the converted vmdk is actually a pair of files: The file to be uploaded to the Image service is ``converted-flat.vmdk``. Image handling --------------- +~~~~~~~~~~~~~~ The ESX hypervisor requires a copy of the VMDK file in order to boot up a virtual machine. As a result, the vCenter OpenStack Compute driver must @@ -902,7 +904,7 @@ Image service. Even with a cached VMDK, there is still a copy operation from the cache location to the hypervisor file directory in the shared data store. To avoid this copy, boot the image in linked_clone mode. To learn how to enable this -mode, see :ref:`vmware-config`. +mode, see :oslo.config:option:`vmware.use_linked_clone`. .. note:: @@ -926,61 +928,30 @@ cached images are stored. have a shared file system. You can automatically purge unused images after a specified period of time. To -configure this action, set these options in the ``DEFAULT`` section in the -``nova.conf`` file: +configure this action, set these options in the :oslo.config:group`image_cache` +section in the ``nova.conf`` file: -``remove_unused_base_images`` - Set this option to ``True`` to specify that unused images should be removed - after the duration specified in the - ``remove_unused_original_minimum_age_seconds`` option. The default is - ``True``. +* :oslo.config:option:`image_cache.remove_unused_base_images` +* :oslo.config:option:`image_cache.remove_unused_original_minimum_age_seconds` -``remove_unused_original_minimum_age_seconds`` - Specifies the duration in seconds after which an unused image is purged from - the cache. The default is ``86400`` (24 hours). .. _vmware-networking: Networking with VMware vSphere -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The VMware driver supports networking with the ``nova-network`` service or the -Networking Service. Depending on your installation, complete these -configuration steps before you provision VMs: - -#. **The nova-network service with the FlatManager or FlatDHCPManager**. - Create a port group with the same name as the ``flat_network_bridge`` value - in the ``nova.conf`` file. The default value is ``br100``. If you specify - another value, the new value must be a valid Linux bridge identifier that - adheres to Linux bridge naming conventions. - - All VM NICs are attached to this port group. - - Ensure that the flat interface of the node that runs the ``nova-network`` - service has a path to this network. +------------------------------ - .. note:: +The VMware driver supports networking with the Networking Service (neutron). +Depending on your installation, complete these configuration steps before you +provision VMs: - When configuring the port binding for this port group in vCenter, specify - ``ephemeral`` for the port binding type. For more information, see - `Choosing a port binding type in ESX/ESXi `_ in the VMware Knowledge Base. - -#. **The nova-network service with the VlanManager**. - Set the ``vlan_interface`` configuration option to match the ESX host - interface that handles VLAN-tagged VM traffic. - - OpenStack Compute automatically creates the corresponding port groups. - -#. If you are using the OpenStack Networking Service: - Before provisioning VMs, create a port group with the same name as the +#. Before provisioning VMs, create a port group with the same name as the ``vmware.integration_bridge`` value in ``nova.conf`` (default is ``br-int``). All VM NICs are attached to this port group for management by the OpenStack Networking plug-in. + Volumes with VMware vSphere -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------- The VMware driver supports attaching volumes from the Block Storage service. The VMware VMDK driver for OpenStack Block Storage is recommended and should be @@ -990,153 +961,20 @@ this has not yet been imported and published). Also an iSCSI volume driver provides limited support and can be used only for attachments. -.. _vmware-config: - -Configuration reference -~~~~~~~~~~~~~~~~~~~~~~~ - -To customize the VMware driver, use the configuration option settings below. - -.. TODO(sdague): for the import we just copied this in from the auto generated - file. We probably need a strategy for doing equivalent autogeneration, but - we don't as of yet. - - Warning: Do not edit this file. It is automatically generated from the - software project's code and your changes will be overwritten. - - The tool to generate this file lives in openstack-doc-tools repository. - - Please make any changes needed in the code, then run the - autogenerate-config-doc tool from the openstack-doc-tools repository, or - ask for help on the documentation mailing list, IRC channel or meeting. - -.. _nova-vmware: - -.. list-table:: Description of VMware configuration options - :header-rows: 1 - :class: config-ref-table - - * - Configuration option = Default value - - Description - * - **[vmware]** - - - * - ``api_retry_count`` = ``10`` - - (Integer) Number of times VMware vCenter server API must be retried on connection failures, e.g. socket error, etc. - * - ``ca_file`` = ``None`` - - (String) Specifies the CA bundle file to be used in verifying the vCenter server certificate. - * - ``cache_prefix`` = ``None`` - - (String) This option adds a prefix to the folder where cached images are stored - - This is not the full path - just a folder prefix. This should only be used when a datastore cache is shared between compute nodes. - .. note:: - - This should only be used when the compute nodes are running on same host or they have a shared file system. - - Possible values: - - * Any string representing the cache prefix to the folder - * - ``cluster_name`` = ``None`` - - (String) Name of a VMware Cluster ComputeResource. - * - ``console_delay_seconds`` = ``None`` - - (Integer) Set this value if affected by an increased network latency causing repeated characters when typing in a remote console. - * - ``datastore_regex`` = ``None`` - - (String) Regular expression pattern to match the name of datastore. - - The datastore_regex setting specifies the datastores to use with Compute. For example, datastore_regex="nas.*" selects all the data stores that have a name starting with "nas". - - .. note:: - - If no regex is given, it just picks the datastore with the most freespace. - - Possible values: - - * Any matching regular expression to a datastore must be given - * - ``host_ip`` = ``None`` - - (String) Hostname or IP address for connection to VMware vCenter host. - * - ``host_password`` = ``None`` - - (String) Password for connection to VMware vCenter host. - * - ``host_port`` = ``443`` - - (Port number) Port for connection to VMware vCenter host. - * - ``host_username`` = ``None`` - - (String) Username for connection to VMware vCenter host. - * - ``insecure`` = ``False`` - - (Boolean) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. - - Related options: - - * ca_file: This option is ignored if "ca_file" is set. - * - ``integration_bridge`` = ``None`` - - (String) This option should be configured only when using the NSX-MH Neutron plugin. This is the name of the integration bridge on the ESXi server or host. This should not be set for any other Neutron plugin. Hence the default value is not set. - - Possible values: - - * Any valid string representing the name of the integration bridge - * - ``maximum_objects`` = ``100`` - - (Integer) This option specifies the limit on the maximum number of objects to return in a single result. - - A positive value will cause the operation to suspend the retrieval when the count of objects reaches the specified limit. The server may still limit the count to something less than the configured value. Any remaining objects may be retrieved with additional requests. - * - ``pbm_default_policy`` = ``None`` - - (String) This option specifies the default policy to be used. - - If pbm_enabled is set and there is no defined storage policy for the specific request, then this policy will be used. - - Possible values: - - * Any valid storage policy such as VSAN default storage policy - - Related options: - - * pbm_enabled - * - ``pbm_enabled`` = ``False`` - - (Boolean) This option enables or disables storage policy based placement of instances. - - Related options: - - * pbm_default_policy - * - ``pbm_wsdl_location`` = ``None`` - - (String) This option specifies the PBM service WSDL file location URL. - - Setting this will disable storage policy based placement of instances. - - Possible values: - - * Any valid file path e.g file:///opt/SDK/spbm/wsdl/pbmService.wsdl - * - ``serial_port_proxy_uri`` = ``None`` - - (String) Identifies a proxy service that provides network access to the serial_port_service_uri. - - Possible values: - - * Any valid URI - - Related options: This option is ignored if serial_port_service_uri is not specified. - - * serial_port_service_uri - * - ``serial_port_service_uri`` = ``None`` - - (String) Identifies the remote system where the serial port traffic will be sent. - - This option adds a virtual serial port which sends console output to a configurable service URI. At the service URI address there will be virtual serial port concentrator that will collect console logs. If this is not set, no serial ports will be added to the created VMs. - - Possible values: - - * Any valid URI - * - ``task_poll_interval`` = ``0.5`` - - (Floating point) Time interval in seconds to poll remote tasks invoked on VMware VC server. - * - ``use_linked_clone`` = ``True`` - - (Boolean) This option enables/disables the use of linked clone. - - The ESX hypervisor requires a copy of the VMDK file in order to boot up a virtual machine. The compute driver must download the VMDK via HTTP from the OpenStack Image service to a datastore that is visible to the hypervisor and cache it. Subsequent virtual machines that need the VMDK use the cached version and don't have to copy the file again from the OpenStack Image service. - - If set to false, even with a cached VMDK, there is still a copy operation from the cache location to the hypervisor file directory in the shared datastore. If set to true, the above copy operation is avoided as it creates copy of the virtual machine that shares virtual disks with its parent VM. - * - ``wsdl_location`` = ``None`` - - (String) This option specifies VIM Service WSDL Location - - If vSphere API versions 5.1 and later is being used, this section can be ignored. If version is less than 5.1, WSDL files must be hosted locally and their location must be specified in the above section. +Troubleshooting +--------------- - Optional over-ride to default location for bug work-arounds. +Operators can troubleshoot VMware specific failures by correlating OpenStack +logs to vCenter logs. Every RPC call which is made by an OpenStack driver has +an ``opID`` which can be traced in the vCenter logs. For example consider the +following excerpt from a ``nova-compute`` log: - Possible values: +.. code-block:: console - * http:///vimService.wsdl + Aug 15 07:31:09 localhost nova-compute[16683]: DEBUG oslo_vmware.service [-] Invoking Folder.CreateVM_Task with opID=oslo.vmware-debb6064-690e-45ac-b0ae-1b94a9638d1f {{(pid=16683) request_handler /opt/stack/oslo.vmware/oslo_vmware/service.py:355}} - * file:///opt/stack/vmware/SDK/wsdl/vim25/vimService.wsdl +In this case the ``opID`` is +``oslo.vmware-debb6064-690e-45ac-b0ae-1b94a9638d1f`` and we can grep the +vCenter log (usually ``/var/log/vmware/vpxd/vpxd.log``) for it to +find if anything went wrong with the ``CreateVM`` operation. diff --git a/doc/source/admin/configuration/hypervisor-xen-api.rst b/doc/source/admin/configuration/hypervisor-xen-api.rst deleted file mode 100644 index 082a37bcbcb..00000000000 --- a/doc/source/admin/configuration/hypervisor-xen-api.rst +++ /dev/null @@ -1,468 +0,0 @@ -.. _compute_xen_api: - -============================================= -XenServer (and other XAPI based Xen variants) -============================================= - -.. todo:: - - os-xenapi version is 0.3.1 currently. - This document should be modified according to the new version. - This todo has been reported as `bug 1718606`_. - -.. _bug 1718606: https://bugs.launchpad.net/nova/+bug/1718606 - - -This section describes XAPI managed hypervisors, and how to use them with -OpenStack. - -Terminology -~~~~~~~~~~~ - -Xen ---- - -A hypervisor that provides the fundamental isolation between virtual machines. -Xen is open source (GPLv2) and is managed by `XenProject.org -`_, a cross-industry organization and a Linux -Foundation Collaborative project. - -Xen is a component of many different products and projects. The hypervisor -itself is very similar across all these projects, but the way that it is -managed can be different, which can cause confusion if you're not clear which -toolstack you are using. Make sure you know what `toolstack -`_ you want before you get -started. If you want to use Xen with libvirt in OpenStack Compute refer to -:doc:`hypervisor-xen-libvirt`. - -XAPI ----- - -XAPI is one of the toolstacks that could control a Xen based hypervisor. -XAPI's role is similar to libvirt's in the KVM world. The API provided by XAPI -is called XenAPI. To learn more about the provided interface, look at `XenAPI -Object Model Overview `_ for definitions of XAPI -specific terms such as SR, VDI, VIF and PIF. - -OpenStack has a compute driver which talks to XAPI, therefore all XAPI managed -servers could be used with OpenStack. - -XenAPI ------- - -XenAPI is the API provided by XAPI. This name is also used by the python -library that is a client for XAPI. A set of packages to use XenAPI on existing -distributions can be built using the `xenserver/buildroot -`_ project. - -XenServer ---------- - -An Open Source virtualization platform that delivers all features needed for -any server and datacenter implementation including the Xen hypervisor and XAPI -for the management. For more information and product downloads, visit -`xenserver.org `_. - -XCP ---- - -XCP is not supported anymore. XCP project recommends all XCP users to upgrade -to the latest version of XenServer by visiting `xenserver.org -`_. - -Privileged and unprivileged domains ------------------------------------ - -A Xen host runs a number of virtual machines, VMs, or domains (the terms are -synonymous on Xen). One of these is in charge of running the rest of the -system, and is known as domain 0, or dom0. It is the first domain to boot after -Xen, and owns the storage and networking hardware, the device drivers, and the -primary control software. Any other VM is unprivileged, and is known as a domU -or guest. All customer VMs are unprivileged, but you should note that on -XenServer (and other XenAPI using hypervisors), the OpenStack Compute service -(``nova-compute``) also runs in a domU. This gives a level of security -isolation between the privileged system software and the OpenStack software -(much of which is customer-facing). This architecture is described in more -detail later. - -Paravirtualized versus hardware virtualized domains ---------------------------------------------------- - -A Xen virtual machine can be paravirtualized (PV) or hardware virtualized -(HVM). This refers to the interaction between Xen, domain 0, and the guest VM's -kernel. PV guests are aware of the fact that they are virtualized and will -co-operate with Xen and domain 0; this gives them better performance -characteristics. HVM guests are not aware of their environment, and the -hardware has to pretend that they are running on an unvirtualized machine. HVM -guests do not need to modify the guest operating system, which is essential -when running Windows. - -In OpenStack, customer VMs may run in either PV or HVM mode. However, the -OpenStack domU (that's the one running ``nova-compute``) must be running in PV -mode. - -xapi pool ---------- - -A resource pool comprises multiple XenServer host installations, bound together -into a single managed entity which can host virtual machines. When combined with -shared storage, VMs could dynamically move between XenServer hosts, with minimal -downtime since no block copying is needed. - -XenAPI deployment architecture -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A basic OpenStack deployment on a XAPI-managed server, assuming that the -network provider is neutron network, looks like this: - -.. figure:: /figures/xenserver_architecture.png - :width: 100% - -Key things to note: - -* The hypervisor: Xen - -* Domain 0: runs XAPI and some small pieces from OpenStack, - the XAPI plug-ins. - -* OpenStack VM: The ``Compute`` service runs in a paravirtualized virtual - machine, on the host under management. Each host runs a local instance of - ``Compute``. It is also running neutron plugin-agent - (``neutron-openvswitch-agent``) to perform local vSwitch configuration. - -* OpenStack Compute uses the XenAPI Python library to talk to XAPI, and it uses - the Management Network to reach from the OpenStack VM to Domain 0. - -Some notes on the networking: - -* The above diagram assumes DHCP networking. - -* There are three main OpenStack networks: - - * Management network: RabbitMQ, MySQL, inter-host communication, and - compute-XAPI communication. Please note that the VM images are downloaded - by the XenAPI plug-ins, so make sure that the OpenStack Image service is - accessible through this network. It usually means binding those services to - the management interface. - - * Tenant network: controlled by neutron, this is used for tenant traffic. - - * Public network: floating IPs, public API endpoints. - -* The networks shown here must be connected to the corresponding physical - networks within the data center. In the simplest case, three individual - physical network cards could be used. It is also possible to use VLANs to - separate these networks. Please note, that the selected configuration must be - in line with the networking model selected for the cloud. (In case of VLAN - networking, the physical channels have to be able to forward the tagged - traffic.) - -* With the Networking service, you should enable Linux bridge in ``Dom0`` which - is used for Compute service. ``nova-compute`` will create Linux bridges for - security group and ``neutron-openvswitch-agent`` in Compute node will apply - security group rules on these Linux bridges. To implement this, you need to - remove ``/etc/modprobe.d/blacklist-bridge*`` in ``Dom0``. - -Further reading -~~~~~~~~~~~~~~~ - -Here are some of the resources available to learn more about Xen: - -* `Citrix XenServer official documentation - `_ -* `What is Xen? by XenProject.org - `_ -* `Xen Hypervisor project - `_ -* `Xapi project `_ -* `Further XenServer and OpenStack information - `_ - -Install XenServer -~~~~~~~~~~~~~~~~~ - -Before you can run OpenStack with XenServer, you must install the hypervisor on -`an appropriate server `_. - -.. note:: - - Xen is a type 1 hypervisor: When your server starts, Xen is the first - software that runs. Consequently, you must install XenServer before you - install the operating system where you want to run OpenStack code. You then - install ``nova-compute`` into a dedicated virtual machine on the host. - -Use the following link to download XenServer's installation media: - -* http://xenserver.org/open-source-virtualization-download.html - -When you install many servers, you might find it easier to perform `PXE boot -installations `_. You can also package any -post-installation changes that you want to make to your XenServer by following -the instructions of `creating your own XenServer supplemental pack -`_. - -.. important:: - - When using ``[xenserver]image_handler=direct_vhd`` (the default), make sure - you use the EXT type of storage repository (SR). Features that require access - to VHD files (such as copy on write, snapshot and migration) do not work when - you use the LVM SR. Storage repository (SR) is a XAPI-specific term relating to - the physical storage where virtual disks are stored. - - On the XenServer installation screen, choose the :guilabel:`XenDesktop - Optimized` option. If you use an answer file, make sure you use - ``srtype="ext"`` in the ``installation`` tag of the answer file. - -Post-installation steps -~~~~~~~~~~~~~~~~~~~~~~~ - -The following steps need to be completed after the hypervisor's installation: - -#. For resize and migrate functionality, enable password-less SSH - authentication and set up the ``/images`` directory on dom0. - -#. Install the XAPI plug-ins. - -#. To support AMI type images, you must set up ``/boot/guest`` - symlink/directory in dom0. - -#. Create a paravirtualized virtual machine that can run ``nova-compute``. - -#. Install and configure ``nova-compute`` in the above virtual machine. - -#. To support live migration requiring no block device migration, you should - add the current host to a xapi pool using shared storage. You need to know - the pool master ip address, user name and password: - -.. code-block:: console - - xe pool-join master-address=MASTER_IP master-username=root master-password=MASTER_PASSWORD - -Install XAPI plug-ins ---------------------- - -When you use a XAPI managed hypervisor, you can install a Python script (or any -executable) on the host side, and execute that through XenAPI. These scripts -are called plug-ins. The OpenStack related XAPI plug-ins live in OpenStack -os-xenapi code repository. These plug-ins have to be copied to dom0's -filesystem, to the appropriate directory, where XAPI can find them. It is -important to ensure that the version of the plug-ins are in line with the -OpenStack Compute installation you are using. - -The plugins should typically be copied from the Nova installation running in -the Compute's DomU (``pip show os-xenapi`` to find its location), but if you -want to download the latest version the following procedure can be used. - -**Manually installing the plug-ins** - -#. Create temporary files/directories: - - .. code-block:: console - - $ OS_XENAPI_TARBALL=$(mktemp) - $ OS_XENAPI_SOURCES=$(mktemp -d) - -#. Get the source from the openstack.org archives. The example assumes the - latest release is used, and the XenServer host is accessible as xenserver. - Match those parameters to your setup. - - .. code-block:: console - - $ OS_XENAPI_URL=https://tarballs.openstack.org/os-xenapi/os-xenapi-0.1.1.tar.gz - $ wget -qO "$OS_XENAPI_TARBALL" "$OS_XENAPI_URL" - $ tar xvf "$OS_XENAPI_TARBALL" -d "$OS_XENAPI_SOURCES" - -#. Copy the plug-ins to the hypervisor: - - .. code-block:: console - - $ PLUGINPATH=$(find $OS_XENAPI_SOURCES -path '*/xapi.d/plugins' -type d -print) - $ tar -czf - -C "$PLUGINPATH" ./ | - > ssh root@xenserver tar -xozf - -C /etc/xapi.d/plugins - -#. Remove temporary files/directories: - - .. code-block:: console - - $ rm "$OS_XENAPI_TARBALL" - $ rm -rf "$OS_XENAPI_SOURCES" - -Prepare for AMI type images ---------------------------- - -To support AMI type images in your OpenStack installation, you must create the -``/boot/guest`` directory on dom0. One of the OpenStack XAPI plugins will -extract the kernel and ramdisk from AKI and ARI images and put them to that -directory. - -OpenStack maintains the contents of this directory and its size should not -increase during normal operation. However, in case of power failures or -accidental shutdowns, some files might be left over. To prevent these files -from filling up dom0's filesystem, set up this directory as a symlink that -points to a subdirectory of the local SR. - -Run these commands in dom0 to achieve this setup: - -.. code-block:: console - - # LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal) - # LOCALPATH="/var/run/sr-mount/$LOCAL_SR/os-guest-kernels" - # mkdir -p "$LOCALPATH" - # ln -s "$LOCALPATH" /boot/guest - -Modify dom0 for resize/migration support ----------------------------------------- - -To resize servers with XenServer you must: - -* Establish a root trust between all hypervisor nodes of your deployment: - - To do so, generate an ssh key-pair with the :command:`ssh-keygen` command. - Ensure that each of your dom0's ``authorized_keys`` file (located in - ``/root/.ssh/authorized_keys``) contains the public key fingerprint (located - in ``/root/.ssh/id_rsa.pub``). - -* Provide a ``/images`` mount point to the dom0 for your hypervisor: - - dom0 space is at a premium so creating a directory in dom0 is potentially - dangerous and likely to fail especially when you resize large servers. The - least you can do is to symlink ``/images`` to your local storage SR. The - following instructions work for an English-based installation of XenServer - and in the case of ext3-based SR (with which the resize functionality is - known to work correctly). - - .. code-block:: console - - # LOCAL_SR=$(xe sr-list name-label="Local storage" --minimal) - # IMG_DIR="/var/run/sr-mount/$LOCAL_SR/images" - # mkdir -p "$IMG_DIR" - # ln -s "$IMG_DIR" /images - -XenAPI configuration reference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following section discusses some commonly changed options when using the -XenAPI driver. The table below provides a complete reference of all -configuration options available for configuring XAPI with OpenStack. - -The recommended way to use XAPI with OpenStack is through the XenAPI driver. -To enable the XenAPI driver, add the following configuration options to -``/etc/nova/nova.conf`` and restart ``OpenStack Compute``: - -.. code-block:: ini - - compute_driver = xenapi.XenAPIDriver - [xenserver] - connection_url = http://your_xenapi_management_ip_address - connection_username = root - connection_password = your_password - ovs_integration_bridge = br-int - -These connection details are used by OpenStack Compute service to contact your -hypervisor and are the same details you use to connect XenCenter, the XenServer -management console, to your XenServer node. - -.. note:: - - The ``connection_url`` is generally the management network IP - address of the XenServer. - -Networking configuration ------------------------- - -The Networking service in the Compute node is running -``neutron-openvswitch-agent``. This manages ``dom0``\'s OVS. You should refer -to the :neutron-doc:`openvswitch_agent.ini sample -` for details, however there are -several specific items to look out for. - -.. code-block:: ini - - [agent] - minimize_polling = False - root_helper_daemon = xenapi_root_helper - - [ovs] - of_listen_address = management_ip_address - ovsdb_connection = tcp:your_xenapi_management_ip_address:6640 - bridge_mappings = :, ... - integration_bridge = br-int - - [xenapi] - connection_url = http://your_xenapi_management_ip_address - connection_username = root - connection_password = your_pass_word - -.. note:: - - The ``ovsdb_connection`` is the connection string for the native OVSDB - backend, you need to enable port 6640 in dom0. - -Agent ------ - -The agent is a piece of software that runs on the instances, and communicates -with OpenStack. In case of the XenAPI driver, the agent communicates with -OpenStack through XenStore (see `the Xen Project Wiki -`_ for more information on XenStore). - -If you don't have the guest agent on your VMs, it takes a long time for -OpenStack Compute to detect that the VM has successfully started. Generally a -large timeout is required for Windows instances, but you may want to adjust: -``agent_version_timeout`` within the ``[xenserver]`` section. - -VNC proxy address ------------------ - -Assuming you are talking to XAPI through a management network, and XenServer is -on the address: 10.10.1.34 specify the same address for the vnc proxy address: -``server_proxyclient_address=10.10.1.34`` - -Storage -------- - -You can specify which Storage Repository to use with nova by editing the -following flag. To use the local-storage setup by the default installer: - -.. code-block:: ini - - sr_matching_filter = "other-config:i18n-key=local-storage" - -Another alternative is to use the "default" storage (for example if you have -attached NFS or any other shared storage): - -.. code-block:: ini - - sr_matching_filter = "default-sr:true" - -Use different image handler ---------------------------- - -We support three different implementations for glance image handler. You -can choose a specific image handler based on the demand: - -* ``direct_vhd``: This image handler will call XAPI plugins to directly - process the VHD files in XenServer SR(Storage Repository). So this handler - only works when the host's SR type is file system based e.g. ext, nfs. - -* ``vdi_local_dev``: This image handler uploads ``tgz`` compressed raw - disk images to the glance image service. - -* ``vdi_remote_stream``: With this image handler, the image data streams - between XenServer and the glance image service. As it uses the remote - APIs supported by XAPI, this plugin works for all SR types supported by - XenServer. - -``direct_vhd`` is the default image handler. If want to use a different image -handler, you can change the config setting of ``image_handler`` within the -``[xenserver]`` section. For example, the following config setting is to use -``vdi_remote_stream`` as the image handler: - -.. code-block:: ini - - [xenserver] - image_handler=vdi_remote_stream diff --git a/doc/source/admin/configuration/hypervisor-xen-libvirt.rst b/doc/source/admin/configuration/hypervisor-xen-libvirt.rst deleted file mode 100644 index 2c28cf03d40..00000000000 --- a/doc/source/admin/configuration/hypervisor-xen-libvirt.rst +++ /dev/null @@ -1,249 +0,0 @@ -=============== -Xen via libvirt -=============== - -OpenStack Compute supports the Xen Project Hypervisor (or Xen). Xen can be -integrated with OpenStack Compute via the `libvirt `_ -`toolstack `_ or via the `XAPI -`_ `toolstack -`_. This section describes how -to set up OpenStack Compute with Xen and libvirt. For information on how to -set up Xen with XAPI refer to :doc:`hypervisor-xen-api`. - -Installing Xen with libvirt -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -At this stage we recommend using the baseline that we use for the `Xen Project -OpenStack CI Loop -`_, which -contains the most recent stability fixes to both Xen and libvirt. - -`Xen 4.5.1 -`_ -(or newer) and `libvirt 1.2.15 `_ (or newer) -contain the minimum required OpenStack improvements for Xen. Although libvirt -1.2.15 works with Xen, libvirt 1.3.2 or newer is recommended. The necessary -Xen changes have also been backported to the Xen 4.4.3 stable branch. Please -check with the Linux and FreeBSD distros you are intending to use as `Dom 0 -`_, whether the relevant -version of Xen and libvirt are available as installable packages. - -The latest releases of Xen and libvirt packages that fulfil the above minimum -requirements for the various openSUSE distributions can always be found and -installed from the `Open Build Service -`_ Virtualization -project. To install these latest packages, add the Virtualization repository -to your software management stack and get the newest packages from there. More -information about the latest Xen and libvirt packages are available `here -`__ and `here -`__. - -Alternatively, it is possible to use the Ubuntu LTS 14.04 Xen Package -**4.4.1-0ubuntu0.14.04.4** (Xen 4.4.1) and apply the patches outlined `here -`__. -You can also use the Ubuntu LTS 14.04 libvirt package **1.2.2 -libvirt_1.2.2-0ubuntu13.1.7** as baseline and update it to libvirt version -1.2.15, or 1.2.14 with the patches outlined `here -`__ -applied. Note that this will require rebuilding these packages partly from -source. - -For further information and latest developments, you may want to consult the -Xen Project's `mailing lists for OpenStack related issues and questions -`_. - -Configuring Xen with libvirt -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To enable Xen via libvirt, ensure the following options are set in -``/etc/nova/nova.conf`` on all hosts running the ``nova-compute`` service. - -.. code-block:: ini - - compute_driver = libvirt.LibvirtDriver - - [libvirt] - virt_type = xen - -Additional configuration options -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Use the following as a guideline for configuring Xen for use in OpenStack: - -#. **Dom0 memory**: Set it between 1GB and 4GB by adding the following - parameter to the Xen Boot Options in the `grub.conf `_ file. - - .. code-block:: ini - - dom0_mem=1024M - - .. note:: - - The above memory limits are suggestions and should be based on the - available compute host resources. For large hosts that will run many - hundreds of instances, the suggested values may need to be higher. - - .. note:: - - The location of the grub.conf file depends on the host Linux distribution - that you are using. Please refer to the distro documentation for more - details (see `Dom 0 `_ for more resources). - -#. **Dom0 vcpus**: Set the virtual CPUs to 4 and employ CPU pinning by adding - the following parameters to the Xen Boot Options in the `grub.conf - `_ file. - - .. code-block:: ini - - dom0_max_vcpus=4 dom0_vcpus_pin - - .. note:: - - Note that the above virtual CPU limits are suggestions and should be - based on the available compute host resources. For large hosts, that will - run many hundred of instances, the suggested values may need to be - higher. - -#. **PV vs HVM guests**: A Xen virtual machine can be paravirtualized (PV) or - hardware virtualized (HVM). The virtualization mode determines the - interaction between Xen, Dom 0, and the guest VM's kernel. PV guests are - aware of the fact that they are virtualized and will co-operate with Xen and - Dom 0. The choice of virtualization mode determines performance - characteristics. For an overview of Xen virtualization modes, see `Xen Guest - Types `_. - - In OpenStack, customer VMs may run in either PV or HVM mode. The mode is a - property of the operating system image used by the VM, and is changed by - adjusting the image metadata stored in the Image service. The image - metadata can be changed using the :command:`openstack` commands. - - To choose one of the HVM modes (HVM, HVM with PV Drivers or PVHVM), use - :command:`openstack` to set the ``vm_mode`` property to ``hvm``. - - To choose one of the HVM modes (HVM, HVM with PV Drivers or PVHVM), use one - of the following two commands: - - .. code-block:: console - - $ openstack image set --property vm_mode=hvm IMAGE - - To chose PV mode, which is supported by NetBSD, FreeBSD and Linux, use one - of the following two commands - - .. code-block:: console - - $ openstack image set --property vm_mode=xen IMAGE - - .. note:: - - The default for virtualization mode in nova is PV mode. - -#. **Image formats**: Xen supports raw, qcow2 and vhd image formats. For more - information on image formats, refer to the `OpenStack Virtual Image Guide - `__ and the - `Storage Options Guide on the Xen Project Wiki - `_. - -#. **Image metadata**: In addition to the ``vm_mode`` property discussed above, - the ``hypervisor_type`` property is another important component of the image - metadata, especially if your cloud contains mixed hypervisor compute nodes. - Setting the ``hypervisor_type`` property allows the nova scheduler to select - a compute node running the specified hypervisor when launching instances of - the image. Image metadata such as ``vm_mode``, ``hypervisor_type``, - architecture, and others can be set when importing the image to the Image - service. The metadata can also be changed using the :command:`openstack` - commands: - - .. code-block:: console - - $ openstack image set --property hypervisor_type=xen vm_mode=hvm IMAGE - - For more more information on image metadata, refer to the `OpenStack Virtual - Image Guide `__. - -#. **Libguestfs file injection**: OpenStack compute nodes can use `libguestfs - `_ to inject files into an instance's image prior to - launching the instance. libguestfs uses libvirt's QEMU driver to start a - qemu process, which is then used to inject files into the image. When using - libguestfs for file injection, the compute node must have the libvirt qemu - driver installed, in addition to the Xen driver. In RPM based distributions, - the qemu driver is provided by the ``libvirt-daemon-qemu`` package. In - Debian and Ubuntu, the qemu driver is provided by the ``libvirt-bin`` - package. - -Troubleshoot Xen with libvirt -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Important log files**: When an instance fails to start, or when you come -across other issues, you should first consult the following log files: - -* ``/var/log/nova/nova-compute.log`` - -* ``/var/log/libvirt/libxl/libxl-driver.log``, - -* ``/var/log/xen/qemu-dm-${instancename}.log``, - -* ``/var/log/xen/xen-hotplug.log``, - -* ``/var/log/xen/console/guest-${instancename}`` (to enable see `Enabling Guest - Console Logs - `_) - -* Host Console Logs (read `Enabling and Retrieving Host Console Logs - `_). - -If you need further help you can ask questions on the mailing lists `xen-users@ -`_, -`wg-openstack@ `_ or `raise a bug `_ against Xen. - -Known issues -~~~~~~~~~~~~ - -* **Networking**: Xen via libvirt is currently only supported with - nova-network. Fixes for a number of bugs are currently being worked on to - make sure that Xen via libvirt will also work with OpenStack Networking - (neutron). - - .. todo:: Is this still true? - -* **Live migration**: Live migration is supported in the libvirt libxl driver - since version 1.2.5. However, there were a number of issues when used with - OpenStack, in particular with libvirt migration protocol compatibility. It is - worth mentioning that libvirt 1.3.0 addresses most of these issues. We do - however recommend using libvirt 1.3.2, which is fully supported and tested as - part of the Xen Project CI loop. It addresses live migration monitoring - related issues and adds support for peer-to-peer migration mode, which nova - relies on. - -* **Live migration monitoring**: On compute nodes running Kilo or later, live - migration monitoring relies on libvirt APIs that are only implemented from - libvirt version 1.3.1 onwards. When attempting to live migrate, the migration - monitoring thread would crash and leave the instance state as "MIGRATING". If - you experience such an issue and you are running on a version released before - libvirt 1.3.1, make sure you backport libvirt commits ad71665 and b7b4391 - from upstream. - -Additional information and resources -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following section contains links to other useful resources. - -* `wiki.xenproject.org/wiki/OpenStack `_ - OpenStack Documentation on the Xen Project wiki - -* `wiki.xenproject.org/wiki/OpenStack_CI_Loop_for_Xen-Libvirt - `_ - - Information about the Xen Project OpenStack CI Loop - -* `wiki.xenproject.org/wiki/OpenStack_via_DevStack - `_ - How to set up - OpenStack via DevStack - -* `Mailing lists for OpenStack related issues and questions - `_ - This - list is dedicated to coordinating bug fixes and issues across Xen, libvirt - and OpenStack and the CI loop. diff --git a/doc/source/admin/configuration/hypervisor-zvm.rst b/doc/source/admin/configuration/hypervisor-zvm.rst new file mode 100644 index 00000000000..1915206b99b --- /dev/null +++ b/doc/source/admin/configuration/hypervisor-zvm.rst @@ -0,0 +1,149 @@ +=== +zVM +=== + +z/VM System Requirements +------------------------ + +* The appropriate APARs installed, the current list of which can be found: z/VM + OpenStack Cloud Information (http://www.vm.ibm.com/sysman/osmntlvl.html). + +.. note:: + + IBM z Systems hardware requirements are based on both the applications and + the load on the system. + + +Active Engine Guide +------------------- + +Active engine is used as an initial configuration and management tool during +deployed machine startup. Currently the z/VM driver uses ``zvmguestconfigure`` +and ``cloud-init`` as a two stage active engine. + +Installation and Configuration of zvmguestconfigure +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Cloudlib4zvm supports initiating changes to a Linux on z Systems virtual +machine while Linux is shut down or the virtual machine is logged off. +The changes to Linux are implemented using an activation engine (AE) +that is run when Linux is booted the next time. The first active engine, +``zvmguestconfigure``, must be installed in the Linux on z Systems virtual +server so it can process change request files transmitted by the +cloudlib4zvm service to the reader of the virtual machine as a class X file. + +.. note:: + + An additional activation engine, cloud-init, should be installed to handle + OpenStack related tailoring of the system. + The cloud-init AE relies on tailoring performed by ``zvmguestconfigure``. + +Installation and Configuration of cloud-init +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +OpenStack uses cloud-init as its activation engine. Some Linux distributions +include cloud-init either already installed or available to be installed. +If your distribution does not include cloud-init, you can download +the code from https://launchpad.net/cloud-init/+download. +After installation, if you issue the following +shell command and no errors occur, cloud-init is installed correctly:: + + cloud-init init --local + +Installation and configuration of cloud-init differs among different Linux +distributions, and cloud-init source code may change. This section provides +general information, but you may have to tailor cloud-init +to meet the needs of your Linux distribution. You can find a +community-maintained list of dependencies at http://ibm.biz/cloudinitLoZ. + +As of the Rocky release, the z/VM OpenStack support has been tested with +cloud-init 0.7.4 and 0.7.5 for RHEL6.x and SLES11.x, 0.7.6 for RHEL7.x and +SLES12.x, and 0.7.8 for Ubuntu 16.04. + +During cloud-init installation, some dependency packages may be required. +You can use zypper and python setuptools to easily resolve these dependencies. +See https://pypi.python.org/pypi/setuptools for more information. + + +Image guide +----------- + +This guideline will describe the requirements and steps to create and +configure images for use with z/VM. + +Image Requirements +~~~~~~~~~~~~~~~~~~ + +* The following Linux distributions are supported for deploy: + + * RHEL 6.2, 6.3, 6.4, 6.5, 6.6, and 6.7 + * RHEL 7.0, 7.1 and 7.2 + * SLES 11.2, 11.3, and 11.4 + * SLES 12 and SLES 12.1 + * Ubuntu 16.04 + +* A supported root disk type for snapshot/spawn. The following are supported: + + * FBA + * ECKD + +* An image deployed on a compute node must match the disk type supported by + that compute node, as configured by the ``zvm_diskpool_type`` property in + the `zvmsdk.conf`_ configuration file in `zvm cloud connector`_ + A compute node supports deployment on either an ECKD or FBA image, + but not both at the same time. If you wish to switch image types, + you need to change the ``zvm_diskpool_type`` and + ``zvm_diskpool`` properties in the `zvmsdk.conf`_ file, accordingly. + Then restart the nova-compute service to make the changes take effect. + +* If you deploy an instance with an ephemeral disk, both the root disk and the + ephemeral disk will be created with the disk type that was specified by + ``zvm_diskpool_type`` property in the `zvmsdk.conf`_ file. That property can + specify either ECKD or FBA. + +* The network interfaces must be IPv4 interfaces. + +* Image names should be restricted to the UTF-8 subset, which corresponds to + the ASCII character set. In addition, special characters such as ``/``, ``\``, + ``$``, ``%``, ``@`` should not be used. For the FBA disk type "vm", + capture and deploy is supported only for an FBA disk with a single partition. + Capture and deploy is not supported for the FBA disk type "vm" on a CMS + formatted FBA disk. + +* The virtual server/Linux instance used as the source of the new image should + meet the following criteria: + + 1. The root filesystem must not be on a logical volume. + + 2. The minidisk on which the root filesystem resides should be a minidisk of + the same type as desired for a subsequent deploy (for example, an ECKD disk + image should be captured for a subsequent deploy to an ECKD disk). + + 3. The minidisks should not be a full-pack minidisk, since cylinder 0 on + full-pack minidisks is reserved, and should be defined with virtual + address 0100. + + 4. The root disk should have a single partition. + + 5. The image being captured should not have any network interface cards (NICs) + defined below virtual address 1100. + +In addition to the specified criteria, the following recommendations allow for +efficient use of the image: + +* The minidisk on which the root filesystem resides should be defined as a + multiple of full gigabytes in size (for example, 1GB or 2GB). + OpenStack specifies disk sizes in full gigabyte values, whereas z/VM + handles disk sizes in other ways (cylinders for ECKD disks, blocks for FBA + disks, and so on). See the appropriate online information if you need to + convert cylinders or blocks to gigabytes; for example: + http://www.mvsforums.com/helpboards/viewtopic.php?t=8316. + +* During subsequent deploys of the image, the OpenStack code will ensure that + a disk image is not copied to a disk smaller than the source disk, + as this would result in loss of data. The disk specified in + the flavor should therefore be equal to or slightly larger than the source + virtual machine's root disk. + +.. _zvmsdk.conf: https://cloudlib4zvm.readthedocs.io/en/latest/configuration.html#configuration-options +.. _zvm cloud connector: https://cloudlib4zvm.readthedocs.io/en/latest/ diff --git a/doc/source/admin/configuration/hypervisors.rst b/doc/source/admin/configuration/hypervisors.rst index 88ed368aa90..ed913b083f3 100644 --- a/doc/source/admin/configuration/hypervisors.rst +++ b/doc/source/admin/configuration/hypervisors.rst @@ -5,16 +5,15 @@ Hypervisors .. toctree:: :maxdepth: 1 - hypervisor-basics.rst - hypervisor-kvm.rst - hypervisor-qemu.rst - hypervisor-xen-api.rst - hypervisor-xen-libvirt.rst - hypervisor-lxc.rst - hypervisor-vmware.rst - hypervisor-hyper-v.rst - hypervisor-virtuozzo.rst - hypervisor-powervm.rst + hypervisor-kvm + hypervisor-qemu + hypervisor-lxc + hypervisor-vmware + hypervisor-hyper-v + hypervisor-virtuozzo + hypervisor-powervm + hypervisor-zvm + hypervisor-ironic OpenStack Compute supports many hypervisors, which might make it difficult for you to choose one. Most installations use only one hypervisor. However, you @@ -38,31 +37,52 @@ The following hypervisors are supported: * `VMware vSphere`_ 5.1.0 and newer - Runs VMware-based Linux and Windows images through a connection with a vCenter server. -* `Xen (using libvirt)`_ - Xen Project Hypervisor using libvirt as - management interface into ``nova-compute`` to run Linux, Windows, FreeBSD and - NetBSD virtual machines. - -* `XenServer`_ - XenServer, Xen Cloud Platform (XCP) and other XAPI based Xen - variants runs Linux or Windows virtual machines. You must install the - ``nova-compute`` service in a para-virtualized VM. - * `Hyper-V`_ - Server virtualization with Microsoft Hyper-V, use to run Windows, Linux, and FreeBSD virtual machines. Runs ``nova-compute`` natively on the Windows virtualization platform. * `Virtuozzo`_ 7.0.0 and newer - OS Containers and Kernel-based Virtual - Machines supported via libvirt virt_type=parallels. The supported formats - include ploop and qcow2 images. + Machines supported. The supported formats include ploop and qcow2 images. -* `PowerVM`_ Server virtualization with IBM PowerVM for AIX, IBM i, and Linux +* `PowerVM`_ - Server virtualization with IBM PowerVM for AIX, IBM i, and Linux workloads on the Power Systems platform. -.. _KVM: http://www.linux-kvm.org/page/Main_Page -.. _LXC: https://linuxcontainers.org/ -.. _QEMU: http://wiki.qemu.org/Manual -.. _VMware vSphere: https://www.vmware.com/support/vsphere-hypervisor -.. _Xen (using libvirt): http://www.xenproject.org -.. _XenServer: http://xenserver.org -.. _Hyper-V: https://azure.microsoft.com/en-us/ -.. _Virtuozzo: https://www.virtuozzo.com/products/vip.html#product-virtuozzo/ +* `zVM`_ - Server virtualization on z Systems and IBM LinuxONE, it can run Linux, + z/OS and more. + +* `Ironic`_ - OpenStack project which provisions bare metal (as opposed to virtual) + machines. + +Nova supports hypervisors via virt drivers. Nova has the following in tree +virt drivers: + +* :oslo.config:option:`compute_driver` = ``libvirt.LibvirtDriver`` + + This driver runs on Linux and supports multiple hypervisor backends, which + can be configured via the :oslo.config:option:`libvirt.virt_type` config + option. + +* :oslo.config:option:`compute_driver` = ``ironic.IronicDriver`` + +* :oslo.config:option:`compute_driver` = ``vmwareapi.VMwareVCDriver`` + +* :oslo.config:option:`compute_driver` = ``hyperv.HyperVDriver`` + +* :oslo.config:option:`compute_driver` = ``powervm.PowerVMDriver`` + +* :oslo.config:option:`compute_driver` = ``zvm.ZVMDriver`` + +* :oslo.config:option:`compute_driver` = ``fake.FakeDriver`` + + This driver does not spawn any virtual machines and therefore should only be + used during testing. + +.. _KVM: https://www.linux-kvm.org/page/Main_Page +.. _LXC: https://linuxcontainers.org +.. _QEMU: https://wiki.qemu.org/Manual +.. _VMware vSphere: https://www.vmware.com/support/vsphere-hypervisor.html +.. _Hyper-V: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview +.. _Virtuozzo: https://www.virtuozzo.com/products/vz7.html .. _PowerVM: https://www.ibm.com/us-en/marketplace/ibm-powervm +.. _zVM: https://www.ibm.com/it-infrastructure/z/zvm +.. _Ironic: https://docs.openstack.org/ironic/latest/ diff --git a/doc/source/admin/configuration/index.rst b/doc/source/admin/configuration/index.rst index 51a3b810eff..233597b1fe4 100644 --- a/doc/source/admin/configuration/index.rst +++ b/doc/source/admin/configuration/index.rst @@ -1,6 +1,6 @@ -=============== - Configuration -=============== +============= +Configuration +============= To configure your Compute installation, you must define configuration options in these files: @@ -19,12 +19,11 @@ A list of config options based on different topics can be found below: .. toctree:: :maxdepth: 1 - /admin/configuration/api.rst - /admin/configuration/resize.rst - /admin/configuration/fibre-channel.rst - /admin/configuration/iscsi-offload.rst - /admin/configuration/hypervisors.rst - /admin/configuration/schedulers.rst - /admin/configuration/cells.rst - /admin/configuration/logs.rst - /admin/configuration/samples/index.rst + /admin/configuration/api + /admin/configuration/resize + /admin/configuration/cross-cell-resize + /admin/configuration/fibre-channel + /admin/configuration/iscsi-offload + /admin/configuration/hypervisors + /admin/configuration/logs + /admin/configuration/samples/index diff --git a/doc/source/admin/configuration/iscsi-offload.rst b/doc/source/admin/configuration/iscsi-offload.rst index ac477082ebd..921869db3cf 100644 --- a/doc/source/admin/configuration/iscsi-offload.rst +++ b/doc/source/admin/configuration/iscsi-offload.rst @@ -9,8 +9,8 @@ desired. Once an open-iscsi interface is configured, the iface name parameter for use. All iSCSI sessions will be bound to this iSCSI interface. Currently supported transports (``iface.transport_name``) are ``be2iscsi``, -``bnx2i``, ``cxgb3i``, ``cxgb4i``, ``qla4xxx``, ``ocs``. Configuration changes -are required on the compute node only. +``bnx2i``, ``cxgb3i``, ``cxgb4i``, ``qla4xxx``, ``ocs``, ``tcp``. Configuration +changes are required on the compute node only. iSER is supported using the separate iSER LibvirtISERVolumeDriver and will be rejected if used via the ``iscsi_iface`` parameter. @@ -69,5 +69,5 @@ iSCSI iface configuration to work. Some transports may require ``iface.ipaddress`` and ``iface.net_ifacename`` as well to bind correctly. - Detailed configuration instructions can be found at - http://www.open-iscsi.org/docs/README. + Detailed configuration instructions can be found at: + https://github.com/open-iscsi/open-iscsi/blob/master/README diff --git a/doc/source/admin/configuration/logs.rst b/doc/source/admin/configuration/logs.rst index 74d3919fcc4..7ecdf1b358f 100644 --- a/doc/source/admin/configuration/logs.rst +++ b/doc/source/admin/configuration/logs.rst @@ -22,21 +22,9 @@ The corresponding log file of each Compute service is stored in the * - ``nova-conductor.log`` - ``openstack-nova-conductor`` - ``nova-conductor`` - * - ``nova-consoleauth.log`` - - ``openstack-nova-consoleauth`` - - ``nova-consoleauth`` - * - ``nova-network.log`` [#a]_ - - ``openstack-nova-network`` - - ``nova-network`` * - ``nova-manage.log`` - ``nova-manage`` - ``nova-manage`` * - ``nova-scheduler.log`` - ``openstack-nova-scheduler`` - ``nova-scheduler`` - -.. rubric:: Footnotes - -.. [#a] The ``nova`` network service (``openstack-nova-network``/ - ``nova-network``) only runs in deployments that are not configured - to use the Networking service (``neutron``). diff --git a/doc/source/admin/configuration/resize.rst b/doc/source/admin/configuration/resize.rst index 7dd2041815f..abf0828f33b 100644 --- a/doc/source/admin/configuration/resize.rst +++ b/doc/source/admin/configuration/resize.rst @@ -1,12 +1,19 @@ -================ -Configure resize -================ +====== +Resize +====== Resize (or Server resize) is the ability to change the flavor of a server, thus allowing it to upscale or downscale according to user needs. For this feature to work properly, you might need to configure some underlying virt layers. -.. todo:: This document needs to be updated for other virt drivers, shared +This document describes how to configure hosts for standard resize. +For information on :term:`cross-cell resize `, refer to +:doc:`/admin/configuration/cross-cell-resize`. + +Virt drivers +------------ + +.. todo:: This section needs to be updated for other virt drivers, shared storage considerations, etc. KVM @@ -20,9 +27,13 @@ compute host to another is needed to copy the VM file across. Cloud end users can find out how to resize a server by reading :doc:`/user/resize`. -XenServer -~~~~~~~~~ -To get resize to work with XenServer (and XCP), you need to establish a root -trust between all hypervisor nodes and provide an ``/image`` mount point to -your hypervisors dom0. +Automatic confirm +----------------- + +There is a periodic task configured by configuration option +:oslo.config:option:`resize_confirm_window` (in seconds). +If this value is not 0, the ``nova-compute`` service will check whether +servers are in a resized state longer than the value of +:oslo.config:option:`resize_confirm_window` and if so will automatically +confirm the resize of the servers. diff --git a/doc/source/admin/configuration/samples/index.rst b/doc/source/admin/configuration/samples/index.rst index 6db5a16a488..30035c1fb0f 100644 --- a/doc/source/admin/configuration/samples/index.rst +++ b/doc/source/admin/configuration/samples/index.rst @@ -7,6 +7,15 @@ Files in this section can be found in ``/etc/nova``. .. toctree:: :maxdepth: 2 - api-paste.ini.rst - policy.yaml.rst - rootwrap.conf.rst + api-paste.ini + rootwrap.conf + +.. # NOTE(gmann): Keep policy sample file for HTML only. + # Sample file are too large and cause TeX memeor issue. + # ref bug# https://bugs.launchpad.net/nova/+bug/1883200 +.. only:: html + + .. toctree:: + :maxdepth: 2 + + policy.yaml diff --git a/doc/source/admin/configuration/schedulers.rst b/doc/source/admin/configuration/schedulers.rst deleted file mode 100644 index 4ef7efcbd9b..00000000000 --- a/doc/source/admin/configuration/schedulers.rst +++ /dev/null @@ -1,1340 +0,0 @@ -================== -Compute schedulers -================== - -Compute uses the ``nova-scheduler`` service to determine how to dispatch -compute requests. For example, the ``nova-scheduler`` service determines on -which host a VM should launch. In the context of filters, the term ``host`` -means a physical node that has a ``nova-compute`` service running on it. You -can configure the scheduler through a variety of options. - -Compute is configured with the following default scheduler options in the -``/etc/nova/nova.conf`` file: - -.. code-block:: ini - - [scheduler] - driver = filter_scheduler - - [filter_scheduler] - available_filters = nova.scheduler.filters.all_filters - enabled_filters = RetryFilter, AvailabilityZoneFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter - -By default, the scheduler ``driver`` is configured as a filter scheduler, as -described in the next section. In the default configuration, this scheduler -considers hosts that meet all the following criteria: - -* Have not been attempted for scheduling purposes (``RetryFilter``). - -* Are in the requested availability zone (``AvailabilityZoneFilter``). - -* Can service the request (``ComputeFilter``). - -* Satisfy the extra specs associated with the instance type - (``ComputeCapabilitiesFilter``). - -* Satisfy any architecture, hypervisor type, or virtual machine mode properties - specified on the instance's image properties (``ImagePropertiesFilter``). - -* Are on a different host than other instances of a group (if requested) - (``ServerGroupAntiAffinityFilter``). - -* Are in a set of group hosts (if requested) (``ServerGroupAffinityFilter``). - -The scheduler chooses a new host when an instance is migrated. - -When evacuating instances from a host, the scheduler service honors the target -host defined by the administrator on the :command:`nova evacuate` command. If -a target is not defined by the administrator, the scheduler determines the -target host. For information about instance evacuation, see -:ref:`Evacuate instances `. - -.. _compute-scheduler-filters: - -Filter scheduler -~~~~~~~~~~~~~~~~ - -The filter scheduler (``nova.scheduler.filter_scheduler.FilterScheduler``) is -the default scheduler for scheduling virtual machine instances. It supports -filtering and weighting to make informed decisions on where a new instance -should be created. - -When the filter scheduler receives a request for a resource, it first applies -filters to determine which hosts are eligible for consideration when -dispatching a resource. Filters are binary: either a host is accepted by the -filter, or it is rejected. Hosts that are accepted by the filter are then -processed by a different algorithm to decide which hosts to use for that -request, described in the :ref:`weights` section. - -**Filtering** - -.. figure:: /figures/filteringWorkflow1.png - -The ``available_filters`` configuration option in ``nova.conf`` -provides the Compute service with the list of the filters that are available -for use by the scheduler. The default setting specifies all of the filters that -are included with the Compute service: - -.. code-block:: ini - - [filter_scheduler] - available_filters = nova.scheduler.filters.all_filters - -This configuration option can be specified multiple times. For example, if you -implemented your own custom filter in Python called ``myfilter.MyFilter`` and -you wanted to use both the built-in filters and your custom filter, your -``nova.conf`` file would contain: - -.. code-block:: ini - - [filter_scheduler] - available_filters = nova.scheduler.filters.all_filters - available_filters = myfilter.MyFilter - -The ``enabled_filters`` configuration option in ``nova.conf`` defines -the list of filters that are applied by the ``nova-scheduler`` service. The -default filters are: - -.. code-block:: ini - - [filter_scheduler] - enabled_filters = RetryFilter, AvailabilityZoneFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter - -Compute filters -~~~~~~~~~~~~~~~ - -The following sections describe the available compute filters. - -AggregateCoreFilter -------------------- - -Filters host by CPU core numbers with a per-aggregate ``cpu_allocation_ratio`` -value. If the per-aggregate value is not found, the value falls back to the -global setting. If the host is in more than one aggregate and more than one -value is found, the minimum value will be used. For information about how to -use this filter, see :ref:`host-aggregates`. See also :ref:`CoreFilter`. - -AggregateDiskFilter -------------------- - -Filters host by disk allocation with a per-aggregate ``disk_allocation_ratio`` -value. If the per-aggregate value is not found, the value falls back to the -global setting. If the host is in more than one aggregate and more than one -value is found, the minimum value will be used. For information about how to -use this filter, see :ref:`host-aggregates`. See also :ref:`DiskFilter`. - -AggregateImagePropertiesIsolation ---------------------------------- - -Matches properties defined in an image's metadata against those of aggregates -to determine host matches: - -* If a host belongs to an aggregate and the aggregate defines one or more - metadata that matches an image's properties, that host is a candidate to boot - the image's instance. - -* If a host does not belong to any aggregate, it can boot instances from all - images. - -For example, the following aggregate ``myWinAgg`` has the Windows operating -system as metadata (named 'windows'): - -.. code-block:: console - - $ openstack aggregate show myWinAgg - +-------------------+----------------------------+ - | Field | Value | - +-------------------+----------------------------+ - | availability_zone | zone1 | - | created_at | 2017-01-01T15:36:44.000000 | - | deleted | False | - | deleted_at | None | - | hosts | [u'sf-devel'] | - | id | 1 | - | name | myWinAgg | - | properties | os_distro='windows' | - | updated_at | None | - +-------------------+----------------------------+ - -In this example, because the following Win-2012 image has the ``windows`` -property, it boots on the ``sf-devel`` host (all other filters being equal): - -.. code-block:: console - - $ openstack image show Win-2012 - +------------------+------------------------------------------------------+ - | Field | Value | - +------------------+------------------------------------------------------+ - | checksum | ee1eca47dc88f4879d8a229cc70a07c6 | - | container_format | bare | - | created_at | 2016-12-13T09:30:30Z | - | disk_format | qcow2 | - | ... | - | name | Win-2012 | - | ... | - | properties | os_distro='windows' | - | ... | - -You can configure the ``AggregateImagePropertiesIsolation`` filter by using the -following options in the ``nova.conf`` file: - -.. code-block:: ini - - # Considers only keys matching the given namespace (string). - # Multiple values can be given, as a comma-separated list. - aggregate_image_properties_isolation_namespace = - - # Separator used between the namespace and keys (string). - aggregate_image_properties_isolation_separator = . - -.. _AggregateInstanceExtraSpecsFilter: - -AggregateInstanceExtraSpecsFilter ---------------------------------- - -Matches properties defined in extra specs for an instance type against -admin-defined properties on a host aggregate. Works with specifications that -are scoped with ``aggregate_instance_extra_specs``. Multiple values can be -given, as a comma-separated list. For backward compatibility, also works with -non-scoped specifications; this action is highly discouraged because it -conflicts with :ref:`ComputeCapabilitiesFilter` filter when you enable both -filters. For information about how to use this filter, see the -:ref:`host-aggregates` section. - -AggregateIoOpsFilter --------------------- - -Filters host by disk allocation with a per-aggregate ``max_io_ops_per_host`` -value. If the per-aggregate value is not found, the value falls back to the -global setting. If the host is in more than one aggregate and more than one -value is found, the minimum value will be used. For information about how to -use this filter, see :ref:`host-aggregates`. See also :ref:`IoOpsFilter`. - -AggregateMultiTenancyIsolation ------------------------------- - -Ensures that the tenant (or list of tenants) creates all instances only on -specific :ref:`host-aggregates`. If a host is in an aggregate that has the -``filter_tenant_id`` metadata key, the host creates instances from only that -tenant or list of tenants. A host can be in different aggregates. If a host -does not belong to an aggregate with the metadata key, the host can create -instances from all tenants. This setting does not isolate the aggregate from -other tenants. Any other tenant can continue to build instances on the -specified aggregate. - -AggregateNumInstancesFilter ---------------------------- - -Filters host by number of instances with a per-aggregate -``max_instances_per_host`` value. If the per-aggregate value is not found, the -value falls back to the global setting. If the host is in more than one -aggregate and thus more than one value is found, the minimum value will be -used. For information about how to use this filter, see -:ref:`host-aggregates`. See also :ref:`NumInstancesFilter`. - -AggregateRamFilter ------------------- - -Filters host by RAM allocation of instances with a per-aggregate -``ram_allocation_ratio`` value. If the per-aggregate value is not found, the -value falls back to the global setting. If the host is in more than one -aggregate and thus more than one value is found, the minimum value will be -used. For information about how to use this filter, see -:ref:`host-aggregates`. See also :ref:`ramfilter`. - -AggregateTypeAffinityFilter ---------------------------- - -This filter passes hosts if no ``instance_type`` key is set or the -``instance_type`` aggregate metadata value contains the name of the -``instance_type`` requested. The value of the ``instance_type`` metadata entry -is a string that may contain either a single ``instance_type`` name or a -comma-separated list of ``instance_type`` names, such as ``m1.nano`` or -``m1.nano,m1.small``. For information about how to use this filter, see -:ref:`host-aggregates`. - -AllHostsFilter --------------- - -This is a no-op filter. It does not eliminate any of the available hosts. - -AvailabilityZoneFilter ----------------------- - -Filters hosts by availability zone. You must enable this filter for the -scheduler to respect availability zones in requests. - -.. _ComputeCapabilitiesFilter: - -ComputeCapabilitiesFilter -------------------------- - -Matches properties defined in extra specs for an instance type against compute -capabilities. If an extra specs key contains a colon (``:``), anything before -the colon is treated as a namespace and anything after the colon is treated as -the key to be matched. If a namespace is present and is not ``capabilities``, -the filter ignores the namespace. For backward compatibility, also treats the -extra specs key as the key to be matched if no namespace is present; this -action is highly discouraged because it conflicts with -:ref:`AggregateInstanceExtraSpecsFilter` filter when you enable both filters. - -Some virt drivers support reporting CPU traits to the Placement service. With that -feature available, you should consider using traits in flavors instead of -ComputeCapabilitiesFilter, because traits provide consistent naming for CPU -features in some virt drivers and querying traits is efficient. For more detail, please see -`Support Matrix `_, -:ref:`Required traits `, -:ref:`Forbidden traits ` and -`Report CPU features to the Placement service `_. - -.. _ComputeFilter: - -ComputeFilter -------------- - -Passes all hosts that are operational and enabled. - -In general, you should always enable this filter. - -.. _CoreFilter: - -CoreFilter ----------- - -Only schedules instances on hosts if sufficient CPU cores are available. If -this filter is not set, the scheduler might over-provision a host based on -cores. For example, the virtual cores running on an instance may exceed the -physical cores. - -You can configure this filter to enable a fixed amount of vCPU overcommitment -by using the ``cpu_allocation_ratio`` configuration option in ``nova.conf``. -The default setting is: - -.. code-block:: ini - - cpu_allocation_ratio = 16.0 - -With this setting, if 8 vCPUs are on a node, the scheduler allows instances up -to 128 vCPU to be run on that node. - -To disallow vCPU overcommitment set: - -.. code-block:: ini - - cpu_allocation_ratio = 1.0 - -.. note:: - - The Compute API always returns the actual number of CPU cores available on a - compute node regardless of the value of the ``cpu_allocation_ratio`` - configuration key. As a result changes to the ``cpu_allocation_ratio`` are - not reflected via the command line clients or the dashboard. Changes to - this configuration key are only taken into account internally in the - scheduler. - -DifferentHostFilter -------------------- - -Schedules the instance on a different host from a set of instances. To take -advantage of this filter, the requester must pass a scheduler hint, using -``different_host`` as the key and a list of instance UUIDs as the value. This -filter is the opposite of the ``SameHostFilter``. Using the -:command:`openstack server create` command, use the ``--hint`` flag. For -example: - -.. code-block:: console - - $ openstack server create --image cedef40a-ed67-4d10-800e-17455edce175 \ - --flavor 1 --hint different_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \ - --hint different_host=8c19174f-4220-44f0-824a-cd1eeef10287 server-1 - -With the API, use the ``os:scheduler_hints`` key. For example: - -.. code-block:: json - - { - "server": { - "name": "server-1", - "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", - "flavorRef": "1" - }, - "os:scheduler_hints": { - "different_host": [ - "a0cf03a5-d921-4877-bb5c-86d26cf818e1", - "8c19174f-4220-44f0-824a-cd1eeef10287" - ] - } - } - -.. _DiskFilter: - -DiskFilter ----------- - -Only schedules instances on hosts if there is sufficient disk space available -for root and ephemeral storage. - -You can configure this filter to enable a fixed amount of disk overcommitment -by using the ``disk_allocation_ratio`` configuration option in the -``nova.conf`` configuration file. The default setting disables the possibility -of the overcommitment and allows launching a VM only if there is a sufficient -amount of disk space available on a host: - -.. code-block:: ini - - disk_allocation_ratio = 1.0 - -DiskFilter always considers the value of the ``disk_available_least`` property -and not the one of the ``free_disk_gb`` property of a hypervisor's statistics: - -.. code-block:: console - - $ openstack hypervisor stats show - +----------------------+-------+ - | Field | Value | - +----------------------+-------+ - | count | 1 | - | current_workload | 0 | - | disk_available_least | 14 | - | free_disk_gb | 27 | - | free_ram_mb | 15374 | - | local_gb | 27 | - | local_gb_used | 0 | - | memory_mb | 15886 | - | memory_mb_used | 512 | - | running_vms | 0 | - | vcpus | 8 | - | vcpus_used | 0 | - +----------------------+-------+ - -As it can be viewed from the command output above, the amount of the available -disk space can be less than the amount of the free disk space. It happens -because the ``disk_available_least`` property accounts for the virtual size -rather than the actual size of images. If you use an image format that is -sparse or copy on write so that each virtual instance does not require a 1:1 -allocation of a virtual disk to a physical storage, it may be useful to allow -the overcommitment of disk space. - -To enable scheduling instances while overcommitting disk resources on the node, -adjust the value of the ``disk_allocation_ratio`` configuration option to -greater than ``1.0``: - -.. code-block:: none - - disk_allocation_ratio > 1.0 - -.. note:: - - If the value is set to ``>1``, we recommend keeping track of the free disk - space, as the value approaching ``0`` may result in the incorrect - functioning of instances using it at the moment. - -.. _ImagePropertiesFilter: - -ImagePropertiesFilter ---------------------- - -Filters hosts based on properties defined on the instance's image. It passes -hosts that can support the specified image properties contained in the -instance. Properties include the architecture, hypervisor type, hypervisor -version (for Xen hypervisor type only), and virtual machine mode. - -For example, an instance might require a host that runs an ARM-based processor, -and QEMU as the hypervisor. You can decorate an image with these properties by -using: - -.. code-block:: console - - $ openstack image set --architecture arm --property hypervisor_type=qemu \ - img-uuid - -The image properties that the filter checks for are: - -``architecture`` - describes the machine architecture required by the image. Examples are - ``i686``, ``x86_64``, ``arm``, and ``ppc64``. - -``hypervisor_type`` - describes the hypervisor required by the image. Examples are ``xen``, - ``qemu``, and ``xenapi``. - - .. note:: - - ``qemu`` is used for both QEMU and KVM hypervisor types. - -``hypervisor_version_requires`` - describes the hypervisor version required by the image. The property is - supported for Xen hypervisor type only. It can be used to enable support for - multiple hypervisor versions, and to prevent instances with newer Xen tools - from being provisioned on an older version of a hypervisor. If available, the - property value is compared to the hypervisor version of the compute host. - - To filter the hosts by the hypervisor version, add the - ``hypervisor_version_requires`` property on the image as metadata and pass an - operator and a required hypervisor version as its value: - - .. code-block:: console - - $ openstack image set --property hypervisor_type=xen --property \ - hypervisor_version_requires=">=4.3" img-uuid - -``vm_mode`` - describes the hypervisor application binary interface (ABI) required by the - image. Examples are ``xen`` for Xen 3.0 paravirtual ABI, ``hvm`` for native - ABI, ``uml`` for User Mode Linux paravirtual ABI, ``exe`` for container virt - executable ABI. - -IsolatedHostsFilter -------------------- - -Allows the admin to define a special (isolated) set of images and a special -(isolated) set of hosts, such that the isolated images can only run on the -isolated hosts, and the isolated hosts can only run isolated images. The flag -``restrict_isolated_hosts_to_isolated_images`` can be used to force isolated -hosts to only run isolated images. - -The logic within the filter depends on the -``restrict_isolated_hosts_to_isolated_images`` config option, which defaults -to True. When True, a volume-backed instance will not be put on an isolated -host. When False, a volume-backed instance can go on any host, isolated or -not. - -The admin must specify the isolated set of images and hosts in the -``nova.conf`` file using the ``isolated_hosts`` and ``isolated_images`` -configuration options. For example: - -.. code-block:: ini - - [filter_scheduler] - isolated_hosts = server1, server2 - isolated_images = 342b492c-128f-4a42-8d3a-c5088cf27d13, ebd267a6-ca86-4d6c-9a0e-bd132d6b7d09 - -.. _IoOpsFilter: - -IoOpsFilter ------------ - -The IoOpsFilter filters hosts by concurrent I/O operations on it. Hosts with -too many concurrent I/O operations will be filtered out. The -``max_io_ops_per_host`` option specifies the maximum number of I/O intensive -instances allowed to run on a host. A host will be ignored by the scheduler if -more than ``max_io_ops_per_host`` instances in build, resize, snapshot, -migrate, rescue or unshelve task states are running on it. - -JsonFilter ----------- - -The JsonFilter allows a user to construct a custom filter by passing a -scheduler hint in JSON format. The following operators are supported: - -* = -* < -* > -* in -* <= -* >= -* not -* or -* and - -The filter supports the following variables: - -* ``$free_ram_mb`` -* ``$free_disk_mb`` -* ``$total_usable_ram_mb`` -* ``$vcpus_total`` -* ``$vcpus_used`` - -Using the :command:`openstack server create` command, use the ``--hint`` flag: - -.. code-block:: console - - $ openstack server create --image 827d564a-e636-4fc4-a376-d36f7ebe1747 \ - --flavor 1 --hint query='[">=","$free_ram_mb",1024]' server1 - -With the API, use the ``os:scheduler_hints`` key: - -.. code-block:: json - - { - "server": { - "name": "server-1", - "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", - "flavorRef": "1" - }, - "os:scheduler_hints": { - "query": "[>=,$free_ram_mb,1024]" - } - } - -MetricsFilter -------------- - -Filters hosts based on meters ``weight_setting``. Only hosts with the -available meters are passed so that the metrics weigher will not fail due to -these hosts. - -NUMATopologyFilter ------------------- - -Filters hosts based on the NUMA topology that was specified for the instance -through the use of flavor ``extra_specs`` in combination with the image -properties, as described in detail in the `related nova-spec document -`_. Filter -will try to match the exact NUMA cells of the instance to those of the host. It -will consider the standard over-subscription limits for each host NUMA cell, -and provide limits to the compute host accordingly. - -.. note:: - - If instance has no topology defined, it will be considered for any host. If - instance has a topology defined, it will be considered only for NUMA capable - hosts. - -.. _NumInstancesFilter: - -NumInstancesFilter ------------------- - -Hosts that have more instances running than specified by the -``max_instances_per_host`` option are filtered out when this filter is in -place. - -PciPassthroughFilter --------------------- - -The filter schedules instances on a host if the host has devices that meet the -device requests in the ``extra_specs`` attribute for the flavor. - -.. _RamFilter: - -RamFilter ---------- - -Only schedules instances on hosts that have sufficient RAM available. If this -filter is not set, the scheduler may over provision a host based on RAM (for -example, the RAM allocated by virtual machine instances may exceed the physical -RAM). - -You can configure this filter to enable a fixed amount of RAM overcommitment by -using the ``ram_allocation_ratio`` configuration option in ``nova.conf``. The -default setting is: - -.. code-block:: ini - - ram_allocation_ratio = 1.5 - -This setting enables 1.5 GB instances to run on any compute node with 1 GB of -free RAM. - -RetryFilter ------------ - -Filters out hosts that have already been attempted for scheduling purposes. If -the scheduler selects a host to respond to a service request, and the host -fails to respond to the request, this filter prevents the scheduler from -retrying that host for the service request. - -This filter is only useful if the ``scheduler_max_attempts`` configuration -option is set to a value greater than zero. - -SameHostFilter --------------- - -Schedules the instance on the same host as another instance in a set of -instances. To take advantage of this filter, the requester must pass a -scheduler hint, using ``same_host`` as the key and a list of instance UUIDs as -the value. This filter is the opposite of the ``DifferentHostFilter``. Using -the :command:`openstack server create` command, use the ``--hint`` flag: - -.. code-block:: console - - $ openstack server create --image cedef40a-ed67-4d10-800e-17455edce175 \ - --flavor 1 --hint same_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \ - --hint same_host=8c19174f-4220-44f0-824a-cd1eeef10287 server-1 - -With the API, use the ``os:scheduler_hints`` key: - -.. code-block:: json - - { - "server": { - "name": "server-1", - "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", - "flavorRef": "1" - }, - "os:scheduler_hints": { - "same_host": [ - "a0cf03a5-d921-4877-bb5c-86d26cf818e1", - "8c19174f-4220-44f0-824a-cd1eeef10287" - ] - } - } - -.. _ServerGroupAffinityFilter: - -ServerGroupAffinityFilter -------------------------- - -The ServerGroupAffinityFilter ensures that an instance is scheduled on to a -host from a set of group hosts. To take advantage of this filter, the requester -must create a server group with an ``affinity`` policy, and pass a scheduler -hint, using ``group`` as the key and the server group UUID as the value. Using -the :command:`openstack server create` command, use the ``--hint`` flag. For -example: - -.. code-block:: console - - $ openstack server group create --policy affinity group-1 - $ openstack server create --image IMAGE_ID --flavor 1 \ - --hint group=SERVER_GROUP_UUID server-1 - -.. _ServerGroupAntiAffinityFilter: - -ServerGroupAntiAffinityFilter ------------------------------ - -The ServerGroupAntiAffinityFilter ensures that each instance in a group is on a -different host. To take advantage of this filter, the requester must create a -server group with an ``anti-affinity`` policy, and pass a scheduler hint, using -``group`` as the key and the server group UUID as the value. Using the -:command:`openstack server create` command, use the ``--hint`` flag. For -example: - -.. code-block:: console - - $ openstack server group create --policy anti-affinity group-1 - $ openstack server create --image IMAGE_ID --flavor 1 \ - --hint group=SERVER_GROUP_UUID server-1 - -SimpleCIDRAffinityFilter ------------------------- - -Schedules the instance based on host IP subnet range. To take advantage of -this filter, the requester must specify a range of valid IP address in CIDR -format, by passing two scheduler hints: - -``build_near_host_ip`` - The first IP address in the subnet (for example, ``192.168.1.1``) - -``cidr`` - The CIDR that corresponds to the subnet (for example, ``/24``) - -Using the :command:`openstack server create` command, use the ``--hint`` flag. -For example, to specify the IP subnet ``192.168.1.1/24``: - -.. code-block:: console - - $ openstack server create --image cedef40a-ed67-4d10-800e-17455edce175 \ - --flavor 1 --hint build_near_host_ip=192.168.1.1 --hint cidr=/24 server-1 - -With the API, use the ``os:scheduler_hints`` key: - -.. code-block:: json - - { - "server": { - "name": "server-1", - "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", - "flavorRef": "1" - }, - "os:scheduler_hints": { - "build_near_host_ip": "192.168.1.1", - "cidr": "24" - } - } - -Cell filters -~~~~~~~~~~~~ - -The following sections describe the available cell filters. - -.. note:: - - These filters are only available for cellsv1 which is deprecated. - -DifferentCellFilter -------------------- - -Schedules the instance on a different cell from a set of instances. To take -advantage of this filter, the requester must pass a scheduler hint, using -``different_cell`` as the key and a list of instance UUIDs as the value. - -ImagePropertiesFilter ---------------------- - -Filters cells based on properties defined on the instance's image. This -filter works specifying the hypervisor required in the image metadata and the -supported hypervisor version in cell capabilities. - -TargetCellFilter ----------------- - -Filters target cells. This filter works by specifying a scheduler hint of -``target_cell``. The value should be the full cell path. - -.. _weights: - -Weights -~~~~~~~ - -When resourcing instances, the filter scheduler filters and weights each host -in the list of acceptable hosts. Each time the scheduler selects a host, it -virtually consumes resources on it, and subsequent selections are adjusted -accordingly. This process is useful when the customer asks for the same large -amount of instances, because weight is computed for each requested instance. - -All weights are normalized before being summed up; the host with the largest -weight is given the highest priority. - -**Weighting hosts** - -.. figure:: /figures/nova-weighting-hosts.png - -If cells are used, cells are weighted by the scheduler in the same manner as -hosts. - -Hosts and cells are weighted based on the following options in the -``/etc/nova/nova.conf`` file: - -.. list-table:: Host weighting options - :header-rows: 1 - :widths: 10, 25, 60 - - * - Section - - Option - - Description - * - [DEFAULT] - - ``ram_weight_multiplier`` - - By default, the scheduler spreads instances across all hosts evenly. - Set the ``ram_weight_multiplier`` option to a negative number if you - prefer stacking instead of spreading. Use a floating-point value. - * - [DEFAULT] - - ``scheduler_host_subset_size`` - - New instances are scheduled on a host that is chosen randomly from a - subset of the N best hosts. This property defines the subset size from - which a host is chosen. A value of 1 chooses the first host returned by - the weighting functions. This value must be at least 1. A value less - than 1 is ignored, and 1 is used instead. Use an integer value. - * - [DEFAULT] - - ``scheduler_weight_classes`` - - Defaults to ``nova.scheduler.weights.all_weighers``. Hosts are then - weighted and sorted with the largest weight winning. - * - [DEFAULT] - - ``io_ops_weight_multiplier`` - - Multiplier used for weighing host I/O operations. A negative value means - a preference to choose light workload compute hosts. - * - [DEFAULT] - - ``soft_affinity_weight_multiplier`` - - Multiplier used for weighing hosts for group soft-affinity. Only a - positive value is meaningful. Negative means that the behavior will - change to the opposite, which is soft-anti-affinity. - * - [DEFAULT] - - ``soft_anti_affinity_weight_multiplier`` - - Multiplier used for weighing hosts for group soft-anti-affinity. Only a - positive value is meaningful. Negative means that the behavior will - change to the opposite, which is soft-affinity. - * - [filter_scheduler] - - ``build_failure_weight_multiplier`` - - Multiplier used for weighing hosts which have recent build failures. A - positive value increases the significance of build failures reported by - the host recently, making them less likely to be chosen. - * - [metrics] - - ``weight_multiplier`` - - Multiplier for weighting meters. Use a floating-point value. - * - [metrics] - - ``weight_setting`` - - Determines how meters are weighted. Use a comma-separated list of - metricName=ratio. For example: ``name1=1.0, name2=-1.0`` results in: - ``name1.value * 1.0 + name2.value * -1.0`` - * - [metrics] - - ``required`` - - Specifies how to treat unavailable meters: - - * True - Raises an exception. To avoid the raised exception, you should - use the scheduler filter ``MetricFilter`` to filter out hosts with - unavailable meters. - * False - Treated as a negative factor in the weighting process (uses - the ``weight_of_unavailable`` option). - * - [metrics] - - ``weight_of_unavailable`` - - If ``required`` is set to False, and any one of the meters set by - ``weight_setting`` is unavailable, the ``weight_of_unavailable`` value - is returned to the scheduler. - -For example: - -.. code-block:: ini - - [DEFAULT] - scheduler_host_subset_size = 1 - scheduler_weight_classes = nova.scheduler.weights.all_weighers - ram_weight_multiplier = 1.0 - io_ops_weight_multiplier = 2.0 - soft_affinity_weight_multiplier = 1.0 - soft_anti_affinity_weight_multiplier = 1.0 - [metrics] - weight_multiplier = 1.0 - weight_setting = name1=1.0, name2=-1.0 - required = false - weight_of_unavailable = -10000.0 - -.. list-table:: Cell weighting options - :header-rows: 1 - :widths: 10, 25, 60 - - * - Section - - Option - - Description - * - [cells] - - ``mute_weight_multiplier`` - - Multiplier to weight mute children (hosts which have not sent - capacity or capacity updates for some time). - Use a negative, floating-point value. - * - [cells] - - ``offset_weight_multiplier`` - - Multiplier to weight cells, so you can specify a preferred cell. - Use a floating point value. - * - [cells] - - ``ram_weight_multiplier`` - - By default, the scheduler spreads instances across all cells evenly. - Set the ``ram_weight_multiplier`` option to a negative number if you - prefer stacking instead of spreading. Use a floating-point value. - * - [cells] - - ``scheduler_weight_classes`` - - Defaults to ``nova.cells.weights.all_weighers``, which maps to all - cell weighers included with Compute. Cells are then weighted and - sorted with the largest weight winning. - -For example: - -.. code-block:: ini - - [cells] - scheduler_weight_classes = nova.cells.weights.all_weighers - mute_weight_multiplier = -10.0 - ram_weight_multiplier = 1.0 - offset_weight_multiplier = 1.0 - -Chance scheduler -~~~~~~~~~~~~~~~~ - -As an administrator, you work with the filter scheduler. However, the Compute -service also uses the Chance Scheduler, -``nova.scheduler.chance.ChanceScheduler``, which randomly selects from lists of -filtered hosts. - -Utilization aware scheduling -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is possible to schedule VMs using advanced scheduling decisions. These -decisions are made based on enhanced usage statistics encompassing data like -memory cache utilization, memory bandwidth utilization, or network bandwidth -utilization. This is disabled by default. The administrator can configure how -the metrics are weighted in the configuration file by using the -``weight_setting`` configuration option in the ``nova.conf`` configuration -file. For example to configure metric1 with ratio1 and metric2 with ratio2: - -.. code-block:: ini - - weight_setting = "metric1=ratio1, metric2=ratio2" - -.. _host-aggregates: - -Host aggregates and availability zones -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Host aggregates are a mechanism for partitioning hosts in an OpenStack cloud, -or a region of an OpenStack cloud, based on arbitrary characteristics. -Examples where an administrator may want to do this include where a group of -hosts have additional hardware or performance characteristics. - -Host aggregates are not explicitly exposed to users. Instead administrators -map flavors to host aggregates. Administrators do this by setting metadata on -a host aggregate, and matching flavor extra specifications. The scheduler then -endeavors to match user requests for instance of the given flavor to a host -aggregate with the same key-value pair in its metadata. Compute nodes can be -in more than one host aggregate. - -Administrators are able to optionally expose a host aggregate as an -availability zone. Availability zones are different from host aggregates in -that they are explicitly exposed to the user, and hosts can only be in a single -availability zone. Administrators can configure a default availability zone -where instances will be scheduled when the user fails to specify one. - -Command-line interface ----------------------- - -The :command:`nova` command-line client supports the following -aggregate-related commands. - -nova aggregate-list - Print a list of all aggregates. - -nova aggregate-create [] - Create a new aggregate named ````, and optionally in availability zone - ``[]`` if specified. The command returns the ID of the - newly created aggregate. Hosts can be made available to multiple host - aggregates. Be careful when adding a host to an additional host aggregate - when the host is also in an availability zone. Pay attention when using the - :command:`nova aggregate-set-metadata` and :command:`nova aggregate-update` - commands to avoid user confusion when they boot instances in different - availability zones. An error occurs if you cannot add a particular host to - an aggregate zone for which it is not intended. - -nova aggregate-delete - Delete an aggregate with its ```` or ````. - -nova aggregate-show - Show details of the aggregate with its ```` or ````. - -nova aggregate-add-host - Add host with name ```` to aggregate with its ```` or ````. - -nova aggregate-remove-host - Remove the host with name ```` from the aggregate with its ```` - or ````. - -nova aggregate-set-metadata [ ...] - Add or update metadata (key-value pairs) associated with the aggregate with - its ```` or ````. - -nova aggregate-update [--name ] [--availability-zone ] - Update the name and/or availability zone for the aggregate. - -nova host-list - List all hosts by service. It has been deprecated since microversion 2.43. - Use :command:`nova hypervisor-list` instead. - -nova hypervisor-list [--matching ] [--marker ] [--limit ] - List hypervisors. - -nova host-update [--status ] [--maintenance ] - Put/resume host into/from maintenance. It has been deprecated since - microversion 2.43. To enable or disable a service, - use :command:`nova service-enable` or :command:`nova service-disable` instead. - -nova service-enable - Enable the service. - -nova service-disable [--reason ] - Disable the service. - -.. note:: - - Only administrators can access these commands. If you try to use these - commands and the user name and tenant that you use to access the Compute - service do not have the ``admin`` role or the appropriate privileges, these - errors occur: - - .. code-block:: console - - ERROR: Policy doesn't allow compute_extension:aggregates to be performed. (HTTP 403) (Request-ID: req-299fbff6-6729-4cef-93b2-e7e1f96b4864) - - .. code-block:: console - - ERROR: Policy doesn't allow compute_extension:hosts to be performed. (HTTP 403) (Request-ID: req-ef2400f6-6776-4ea3-b6f1-7704085c27d1) - -Configure scheduler to support host aggregates ----------------------------------------------- - -One common use case for host aggregates is when you want to support scheduling -instances to a subset of compute hosts because they have a specific capability. -For example, you may want to allow users to request compute hosts that have SSD -drives if they need access to faster disk I/O, or access to compute hosts that -have GPU cards to take advantage of GPU-accelerated code. - -To configure the scheduler to support host aggregates, the -``scheduler_default_filters`` configuration option must contain the -``AggregateInstanceExtraSpecsFilter`` in addition to the other filters used by -the scheduler. Add the following line to ``/etc/nova/nova.conf`` on the host -that runs the ``nova-scheduler`` service to enable host aggregates filtering, -as well as the other filters that are typically enabled: - -.. code-block:: ini - - scheduler_default_filters=AggregateInstanceExtraSpecsFilter,RetryFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter - -Example: Specify compute hosts with SSDs ----------------------------------------- - -This example configures the Compute service to enable users to request nodes -that have solid-state drives (SSDs). You create a ``fast-io`` host aggregate in -the ``nova`` availability zone and you add the ``ssd=true`` key-value pair to -the aggregate. Then, you add the ``node1``, and ``node2`` compute nodes to it. - -.. code-block:: console - - $ openstack aggregate create --zone nova fast-io - +-------------------+----------------------------+ - | Field | Value | - +-------------------+----------------------------+ - | availability_zone | nova | - | created_at | 2016-12-22T07:31:13.013466 | - | deleted | False | - | deleted_at | None | - | id | 1 | - | name | fast-io | - | updated_at | None | - +-------------------+----------------------------+ - - $ openstack aggregate set --property ssd=true 1 - +-------------------+----------------------------+ - | Field | Value | - +-------------------+----------------------------+ - | availability_zone | nova | - | created_at | 2016-12-22T07:31:13.000000 | - | deleted | False | - | deleted_at | None | - | hosts | [] | - | id | 1 | - | name | fast-io | - | properties | ssd='true' | - | updated_at | None | - +-------------------+----------------------------+ - - $ openstack aggregate add host 1 node1 - +-------------------+--------------------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------------------+ - | availability_zone | nova | - | created_at | 2016-12-22T07:31:13.000000 | - | deleted | False | - | deleted_at | None | - | hosts | [u'node1'] | - | id | 1 | - | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} | - | name | fast-io | - | updated_at | None | - +-------------------+--------------------------------------------------+ - - $ openstack aggregate add host 1 node2 - +-------------------+--------------------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------------------+ - | availability_zone | nova | - | created_at | 2016-12-22T07:31:13.000000 | - | deleted | False | - | deleted_at | None | - | hosts | [u'node1', u'node2'] | - | id | 1 | - | metadata | {u'ssd': u'true', u'availability_zone': u'nova'} | - | name | fast-io | - | updated_at | None | - +-------------------+--------------------------------------------------+ - -Use the :command:`openstack flavor create` command to create the ``ssd.large`` -flavor called with an ID of 6, 8 GB of RAM, 80 GB root disk, and 4 vCPUs. - -.. code-block:: console - - $ openstack flavor create --id 6 --ram 8192 --disk 80 --vcpus 4 ssd.large - +----------------------------+-----------+ - | Field | Value | - +----------------------------+-----------+ - | OS-FLV-DISABLED:disabled | False | - | OS-FLV-EXT-DATA:ephemeral | 0 | - | disk | 80 | - | id | 6 | - | name | ssd.large | - | os-flavor-access:is_public | True | - | ram | 8192 | - | rxtx_factor | 1.0 | - | swap | | - | vcpus | 4 | - +----------------------------+-----------+ - -Once the flavor is created, specify one or more key-value pairs that match the -key-value pairs on the host aggregates with scope -``aggregate_instance_extra_specs``. In this case, that is the -``aggregate_instance_extra_specs:ssd=true`` key-value pair. Setting a -key-value pair on a flavor is done using the :command:`openstack flavor set` -command. - -.. code-block:: console - - $ openstack flavor set --property aggregate_instance_extra_specs:ssd=true ssd.large - -Once it is set, you should see the ``extra_specs`` property of the -``ssd.large`` flavor populated with a key of ``ssd`` and a corresponding value -of ``true``. - -.. code-block:: console - - $ openstack flavor show ssd.large - +----------------------------+-------------------------------------------+ - | Field | Value | - +----------------------------+-------------------------------------------+ - | OS-FLV-DISABLED:disabled | False | - | OS-FLV-EXT-DATA:ephemeral | 0 | - | disk | 80 | - | id | 6 | - | name | ssd.large | - | os-flavor-access:is_public | True | - | properties | aggregate_instance_extra_specs:ssd='true' | - | ram | 8192 | - | rxtx_factor | 1.0 | - | swap | | - | vcpus | 4 | - +----------------------------+-------------------------------------------+ - -Now, when a user requests an instance with the ``ssd.large`` flavor, -the scheduler only considers hosts with the ``ssd=true`` key-value pair. -In this example, these are ``node1`` and ``node2``. - -Aggregates in Placement ------------------------ - -Aggregates also exist in placement and are not the same thing as host -aggregates in nova. These aggregates are defined (purely) as groupings -of related resource providers. Since compute nodes in nova are -represented in placement as resource providers, they can be added to a -placement aggregate as well. For example, get the uuid of the compute -node using :command:`openstack hypervisor list` and add it to an -aggregate in placement using :command:`openstack placement aggregate -set`. - -.. code-block:: console - - $ openstack --os-compute-api-version=2.53 hypervisor list - +--------------------------------------+---------------------+-----------------+-----------------+-------+ - | ID | Hypervisor Hostname | Hypervisor Type | Host IP | State | - +--------------------------------------+---------------------+-----------------+-----------------+-------+ - | 815a5634-86fb-4e1e-8824-8a631fee3e06 | node1 | QEMU | 192.168.1.123 | up | - +--------------------------------------+---------------------+-----------------+-----------------+-------+ - - $ openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate df4c74f3-d2c4-4991-b461-f1a678e1d161 815a5634-86fb-4e1e-8824-8a631fee3e06 - -Some scheduling filter operations can be performed by placement for -increased speed and efficiency. - -.. note:: - - The nova-api service attempts (as of nova 18.0.0) to automatically mirror - the association of a compute host with an aggregate when an administrator - adds or removes a host to/from a nova host aggregate. This should alleviate - the need to manually create those association records in the placement API - using the ``openstack resource provider aggregate set`` CLI invocation. - -Tenant Isolation with Placement -------------------------------- - -In order to use placement to isolate tenants, there must be placement -aggregates that match the membership and UUID of nova host aggregates -that you want to use for isolation. The same key pattern in aggregate -metadata used by the `AggregateMultiTenancyIsolation` filter controls -this function, and is enabled by setting -`[scheduler]/limit_tenants_to_placement_aggregate=True`. - -.. code-block:: console - - $ openstack --os-compute-api-version=2.53 aggregate create myagg - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | availability_zone | None | - | created_at | 2018-03-29T16:22:23.175884 | - | deleted | False | - | deleted_at | None | - | id | 4 | - | name | myagg | - | updated_at | None | - | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | - +-------------------+--------------------------------------+ - - $ openstack --os-compute-api-version=2.53 aggregate add host myagg node1 - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | availability_zone | None | - | created_at | 2018-03-29T16:22:23.175884 | - | deleted | False | - | deleted_at | None | - | hosts | [u'node1'] | - | id | 4 | - | name | myagg | - | updated_at | None | - | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | - +-------------------+--------------------------------------+ - - $ openstack project list -f value | grep 'demo' - 9691591f913949818a514f95286a6b90 demo - - $ openstack aggregate set --property filter_tenant_id=9691591f913949818a514f95286a6b90 myagg - - $ openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 815a5634-86fb-4e1e-8824-8a631fee3e06 - -Availability Zones with Placement ---------------------------------- - -In order to use placement to honor availability zone requests, there must be -placement aggregates that match the membership and UUID of nova host aggregates -that you assign as availability zones. The same key in aggregate metadata used -by the `AvailabilityZoneFilter` filter controls this function, and is enabled by -setting `[scheduler]/query_placement_for_availability_zone=True`. - -.. code-block:: console - - $ openstack --os-compute-api-version=2.53 aggregate create myaz - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | availability_zone | None | - | created_at | 2018-03-29T16:22:23.175884 | - | deleted | False | - | deleted_at | None | - | id | 4 | - | name | myaz | - | updated_at | None | - | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | - +-------------------+--------------------------------------+ - - $ openstack --os-compute-api-version=2.53 aggregate add host myaz node1 - +-------------------+--------------------------------------+ - | Field | Value | - +-------------------+--------------------------------------+ - | availability_zone | None | - | created_at | 2018-03-29T16:22:23.175884 | - | deleted | False | - | deleted_at | None | - | hosts | [u'node1'] | - | id | 4 | - | name | myagg | - | updated_at | None | - | uuid | 019e2189-31b3-49e1-aff2-b220ebd91c24 | - +-------------------+--------------------------------------+ - - $ openstack aggregate set --property availability_zone=az002 myaz - - $ openstack --os-placement-api-version=1.2 resource provider aggregate set --aggregate 019e2189-31b3-49e1-aff2-b220ebd91c24 815a5634-86fb-4e1e-8824-8a631fee3e06 - -With the above configuration, the `AvailabilityZoneFilter` filter can be disabled -in `[filter_scheduler]/enabled_filters` while retaining proper behavior (and doing -so with the higher performance of placement's implementation). - -XenServer hypervisor pools to support live migration ----------------------------------------------------- - -When using the XenAPI-based hypervisor, the Compute service uses host -aggregates to manage XenServer Resource pools, which are used in supporting -live migration. - -Cells considerations -~~~~~~~~~~~~~~~~~~~~ - -By default cells are enabled for scheduling new instances but they can be -disabled (new schedulings to the cell are blocked). This may be useful for -users while performing cell maintenance, failures or other interventions. It is -to be noted that creating pre-disabled cells and enabling/disabling existing -cells should either be followed by a restart or SIGHUP of the nova-scheduler -service for the changes to take effect. - -Command-line interface ----------------------- - -The :command:`nova-manage` command-line client supports the cell-disable -related commands. To enable or disable a cell, use -:command:`nova-manage cell_v2 update_cell` and to create pre-disabled cells, -use :command:`nova-manage cell_v2 create_cell`. See the -:ref:`man-page-cells-v2` man page for details on command usage. diff --git a/doc/source/admin/configuring-migrations.rst b/doc/source/admin/configuring-migrations.rst index 1f1a0c0ee98..63edae6e216 100644 --- a/doc/source/admin/configuring-migrations.rst +++ b/doc/source/admin/configuring-migrations.rst @@ -10,16 +10,15 @@ source host, but migration can also be useful to redistribute the load when many VM instances are running on a specific physical machine. This document covers live migrations using the -:ref:`configuring-migrations-kvm-libvirt` and -:ref:`configuring-migrations-xenserver` hypervisors. +:ref:`configuring-migrations-kvm-libvirt` and VMWare hypervisors .. :ref:`_configuring-migrations-kvm-libvirt` -.. :ref:`_configuring-migrations-xenserver` .. note:: Not all Compute service hypervisor drivers support live-migration, or - support all live-migration features. + support all live-migration features. Similarly not all compute service + features are supported. Consult :doc:`/user/support-matrix` to determine which hypervisors support live-migration. @@ -67,21 +66,17 @@ The migration types are: different host in the same cell, but not across cells. The following sections describe how to configure your hosts for live migrations -using the KVM and XenServer hypervisors. +using the libvirt virt driver and KVM hypervisor. .. _configuring-migrations-kvm-libvirt: -KVM-libvirt -~~~~~~~~~~~ - -.. :ref:`_configuring-migrations-kvm-general` -.. :ref:`_configuring-migrations-kvm-block-and-volume-migration` -.. :ref:`_configuring-migrations-kvm-shared-storage` +Libvirt +------- .. _configuring-migrations-kvm-general: General configuration ---------------------- +~~~~~~~~~~~~~~~~~~~~~ To enable any type of live migration, configure the compute hosts according to the instructions below: @@ -135,20 +130,36 @@ the instructions below: Be mindful of the security risks introduced by opening ports. +.. _`configuring-migrations-securing-live-migration-streams`: + +Securing live migration streams +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If your compute nodes have at least libvirt 4.4.0 and QEMU 2.11.0, it is +strongly recommended to secure all your live migration streams by taking +advantage of the "QEMU-native TLS" feature. This requires a +pre-existing PKI (Public Key Infrastructure) setup. For further details +on how to set this all up, refer to the +:doc:`secure-live-migration-with-qemu-native-tls` document. + + .. _configuring-migrations-kvm-block-and-volume-migration: Block migration, volume-based live migration --------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -No additional configuration is required for block migration and volume-backed -live migration. +If your environment satisfies the requirements for "QEMU-native TLS", +then block migration requires some setup; refer to the above section, +`Securing live migration streams`_, for details. Otherwise, no +additional configuration is required for block migration and +volume-backed live migration. Be aware that block migration adds load to the network and storage subsystems. .. _configuring-migrations-kvm-shared-storage: Shared storage --------------- +~~~~~~~~~~~~~~ Compute hosts have many options for sharing storage, for example NFS, shared disk array LUNs, Ceph or GlusterFS. @@ -208,7 +219,7 @@ hosts. .. _configuring-migrations-kvm-advanced: Advanced configuration for KVM and QEMU ---------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Live migration copies the instance's memory from the source to the destination compute host. After a memory page has been copied, the instance may write to it @@ -221,27 +232,17 @@ memory-intensive instances succeed. #. **Live migration completion timeout** - The Compute service aborts a migration when it has been running for too - long. The timeout is calculated based on the instance size, which is the - instance's memory size in GiB. In the case of block migration, the size of - ephemeral storage in GiB is added. + The Compute service will either abort or force complete a migration + when it has been running too long. This behavior is configurable + using the :oslo.config:option:`libvirt.live_migration_timeout_action` + config option. The timeout is calculated based on the instance size, which + is the instance's memory size in GiB. In the case of block migration, the + size of ephemeral storage in GiB is added. The timeout in seconds is the instance size multiplied by the configurable - parameter ``live_migration_completion_timeout``, whose default is 800. For - example, shared-storage live migration of an instance with 8GiB memory will - time out after 6400 seconds. - -#. **Live migration progress timeout** - - The Compute service also aborts a live migration when it detects that memory - copy is not making progress for a certain time. You can set this time, in - seconds, through the configurable parameter - ``live_migration_progress_timeout``. - - In Ocata, the default value of ``live_migration_progress_timeout`` is 0, - which disables progress timeouts. You should not change this value, since - the algorithm that detects memory copy progress has been determined to be - unreliable. It may be re-enabled in future releases. + parameter :oslo.config:option:`libvirt.live_migration_completion_timeout`, + whose default is 800. For example, shared-storage live migration of an + instance with 8GiB memory will time out after 6400 seconds. #. **Instance downtime** @@ -316,81 +317,16 @@ memory-intensive instances succeed. The full list of live migration configuration parameters is documented in the :doc:`Nova Configuration Options ` -.. _configuring-migrations-xenserver: - -XenServer -~~~~~~~~~ - -.. :ref:Shared Storage -.. :ref:Block migration - -.. _configuring-migrations-xenserver-shared-storage: - -Shared storage --------------- - -**Prerequisites** - -- **Compatible XenServer hypervisors**. - - For more information, see the `Requirements for Creating Resource Pools - `_ - section of the XenServer Administrator's Guide. - -- **Shared storage**. - - An NFS export, visible to all XenServer hosts. - - .. note:: - - For the supported NFS versions, see the `NFS and SMB - `_ - section of the XenServer Administrator's Guide. - -To use shared storage live migration with XenServer hypervisors, the hosts must -be joined to a XenServer pool. - -.. rubric:: Using shared storage live migrations with XenServer Hypervisors - -#. Add an NFS VHD storage to your master XenServer, and set it as the default - storage repository. For more information, see NFS VHD in the XenServer - Administrator's Guide. - -#. Configure all compute nodes to use the default storage repository (``sr``) - for pool operations. Add this line to your ``nova.conf`` configuration files - on all compute nodes: - - .. code-block:: ini - - sr_matching_filter=default-sr:true - -#. To add a host to a pool, you need to know the pool master ip address, - user name and password. Run below command on the XenServer host: - - .. code-block:: console - - $ xe pool-join master-address=MASTER_IP master-username=root master-password=MASTER_PASSWORD - - .. note:: - - The added compute node and the host will shut down to join the host to - the XenServer pool. The operation will fail if any server other than the - compute node is running or suspended on the host. - -.. _configuring-migrations-xenserver-block-migration: - -Block migration ---------------- -- **Compatible XenServer hypervisors**. +VMware +------ - The hypervisors must support the Storage XenMotion feature. See your - XenServer manual to make sure your edition has this feature. +.. :ref:`_configuring-migrations-vmware` - .. note:: +.. _configuring-migrations-vmware: - - To use block migration, you must use the ``--block-migrate`` parameter - with the live migration command. +vSphere configuration +~~~~~~~~~~~~~~~~~~~~~ - - Block migration works only with EXT local storage storage repositories, - and the server must not have any volumes attached. +Enable vMotion on all ESX hosts which are managed by Nova by following the +instructions in `this `_ KB article. diff --git a/doc/source/admin/cpu-models.rst b/doc/source/admin/cpu-models.rst new file mode 100644 index 00000000000..06ffdb61b66 --- /dev/null +++ b/doc/source/admin/cpu-models.rst @@ -0,0 +1,320 @@ +========== +CPU models +========== + +Nova allows you to control the guest CPU model that is exposed to instances. +Use cases include: + +* To maximize performance of instances by exposing new host CPU features to the + guest + +* To ensure a consistent default behavior across all machines, removing + reliance on system defaults. + +.. important:: + + The functionality described below is currently only supported by the + libvirt driver. + + +CPU modes +--------- + +In libvirt, the CPU is specified by providing a base CPU model name (which is a +shorthand for a set of feature flags), a set of additional feature flags, and +the topology (sockets/cores/threads). The libvirt KVM driver provides a number +of standard CPU model names. These models are defined in +``/usr/share/libvirt/cpu_map/*.xml``. You can inspect these files to determine +which models are supported by your local installation. + +Two Compute configuration options in the :oslo.config:group:`libvirt` group +of ``nova.conf`` define which type of CPU model is exposed to the hypervisor +when using KVM: :oslo.config:option:`libvirt.cpu_mode` and +:oslo.config:option:`libvirt.cpu_models`. + +The :oslo.config:option:`libvirt.cpu_mode` option can take one of the following +values: ``none``, ``host-passthrough``, ``host-model``, and ``custom``. + +See `Effective Virtual CPU configuration in Nova`__ for a recorded presentation +about this topic. + +.. __: https://www.openstack.org/videos/summits/berlin-2018/effective-virtual-cpu-configuration-in-nova + +Host model +~~~~~~~~~~ + +If :oslo.config:option:`cpu_mode=host-model `, the CPU model +in ``/usr/share/libvirt/cpu_map/*.xml`` that most closely matches the host and +requests additional CPU flags to complete the match. This CPU model has a +number of advantages: + +* It provides almost all of the host CPU features to the guest, thus providing + close to the maximum functionality and performance possible. + +* It auto-adds critical guest CPU flags for mitigation from certain security + flaws, *provided* the CPU microcode, kernel, QEMU, and libvirt are all + updated. + +* It computes live migration compatibility, with the caveat that live migration + in both directions is not always possible. + +In general, using ``host-model`` is a safe choice if your compute node CPUs are +largely identical. However, if your compute nodes span multiple processor +generations, you may be better advised to select a ``custom`` CPU model. + +The ``host-model`` CPU model is the default for the KVM & QEMU hypervisors +(:oslo.config:option:`libvirt.virt_type`\ =``kvm``/``qemu``) + +.. note:: + + As noted above, live migration is not always possible in both directions + when using ``host-model``. During live migration, the source CPU model + definition is transferred to the destination host as-is. This results in the + migrated guest on the destination seeing exactly the same CPU model as on + source even if the destination compute host is capable of providing more CPU + features. However, shutting down and restarting the guest on the may present + different hardware to the guest, as per the new capabilities of the + destination compute. + +Host passthrough +~~~~~~~~~~~~~~~~ + +If :oslo.config:option:`cpu_mode=host-passthrough `, libvirt +tells KVM to pass through the host CPU with no modifications. In comparison to +``host-model`` which simply matches feature flags, ``host-passthrough`` ensures +every last detail of the host CPU is matched. This gives the best performance, +and can be important to some apps which check low level CPU details, but it +comes at a cost with respect to migration. + +In ``host-passthrough`` mode, the guest can only be live-migrated to a target +host that matches the source host extremely closely. This includes the physical +CPU model and running microcode, and may even include the running kernel. Use +this mode only if your compute nodes have a very large degree of homogeneity +(i.e. substantially all of your compute nodes use the exact same CPU generation +and model), and you make sure to only live-migrate between hosts with exactly +matching kernel versions. Failure to do so will result in an inability to +support any form of live migration. + +.. note:: + + The reason for that it is necessary for the CPU microcode versions to match + is that hardware performance counters are exposed to an instance and it is + likely that they may vary between different CPU models. There may also be + other reasons due to security fixes for some hardware security flaws being + included in CPU microcode. + +Custom +~~~~~~ + +If :oslo.config:option:`cpu_mode=custom `, you can explicitly +specify an ordered list of supported named models using the +:oslo.config:option:`libvirt.cpu_models` configuration option. It is expected +that the list is ordered so that the more common and less advanced CPU models +are listed earlier. + +In selecting the ``custom`` mode, along with a +:oslo.config:option:`libvirt.cpu_models` that matches the oldest of your compute +node CPUs, you can ensure that live migration between compute nodes will always +be possible. However, you should ensure that the +:oslo.config:option:`libvirt.cpu_models` you select passes the correct CPU +feature flags to the guest. + +If you need to further tweak your CPU feature flags in the ``custom`` mode, see +`CPU feature flags`_. + +.. note:: + + If :oslo.config:option:`libvirt.cpu_models` is configured, + the CPU models in the list needs to be compatible with the host CPU. Also, if + :oslo.config:option:`libvirt.cpu_model_extra_flags` is configured, all flags + needs to be compatible with the host CPU. If incompatible CPU models or flags + are specified, nova service will raise an error and fail to start. + +None +~~~~ + +If :oslo.config:option:`cpu_mode=none `, libvirt does not +specify a CPU model. Instead, the hypervisor chooses the default model. + +The ``none`` CPU model is the default for all non-KVM.QEMU hypervisors. +(:oslo.config:option:`libvirt.virt_type`\ !=``kvm``/``qemu``) + + +CPU feature flags +----------------- + +.. versionadded:: 18.0.0 (Rocky) + +Regardless of your configured :oslo.config:option:`libvirt.cpu_mode`, it is +also possible to selectively enable additional feature flags. This can be +accomplished using the :oslo.config:option:`libvirt.cpu_model_extra_flags` +config option. For example, suppose you have configured a custom CPU model of +``IvyBridge``, which normally does not enable the ``pcid`` feature flag, but +you do want to pass ``pcid`` into your guest instances. In this case, you could +configure the following in ``nova.conf`` to enable this flag. + +.. code-block:: ini + + [libvirt] + cpu_mode = custom + cpu_models = IvyBridge + cpu_model_extra_flags = pcid + +An end user can also specify required CPU features through traits. When +specified, the libvirt driver will select the first CPU model in the +:oslo.config:option:`libvirt.cpu_models` list that can provide the requested +feature traits. If no CPU feature traits are specified then the instance will +be configured with the first CPU model in the list. + +Consider the following ``nova.conf``: + +.. code-block:: ini + + [libvirt] + cpu_mode = custom + cpu_models = Penryn,IvyBridge,Haswell,Broadwell,Skylake-Client + +These different CPU models support different feature flags and are correctly +configured in order of oldest (and therefore most widely supported) to newest. +If the user explicitly required the ``avx`` and ``avx2`` CPU features, the +latter of which is only found of Haswell-generation processors or newer, then +they could request them using the +:nova:extra-spec:`trait{group}:HW_CPU_X86_AVX` and +:nova:extra-spec:`trait{group}:HW_CPU_X86_AVX2` flavor extra specs. For +example: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property trait:HW_CPU_X86_AVX=required \ + --property trait:HW_CPU_X86_AVX2=required + +As ``Haswell`` is the first CPU model supporting both of these CPU features, +the instance would be configured with this model. + +.. _mitigation-for-Intel-MDS-security-flaws: + +Mitigation for MDS ("Microarchitectural Data Sampling") Security Flaws +---------------------------------------------------------------------- + +In May 2019, four new microprocessor flaws, known as `MDS`__ and also referred +to as `RIDL and Fallout`__ or `ZombieLoad`__, were discovered. +These flaws affect unpatched Nova compute nodes and instances running on Intel +x86_64 CPUs. + +.. __: https://access.redhat.com/security/vulnerabilities/mds +.. __: https://mdsattacks.com/ +.. __: https://zombieloadattack.com + +Resolution +~~~~~~~~~~ + +To get mitigation for the said MDS security flaws, a new CPU flag, +``md-clear``, needs to be exposed to the Nova instances. This can be done as +follows. + +#. Update the following components to the versions from your Linux + distribution that have fixes for the MDS flaws, on all compute nodes + with Intel x86_64 CPUs: + + - ``microcode_ctl`` + - ``kernel`` + - ``qemu-system-x86`` + - ``libvirt`` + +#. When using the libvirt driver, ensure that the CPU flag ``md-clear`` + is exposed to the Nova instances. This can be done in one of three ways, + depending on your configured CPU mode: + + #. :oslo.config:option:`libvirt.cpu_mode`\ =host-model + + When using the ``host-model`` CPU mode, the ``md-clear`` CPU flag + will be passed through to the Nova guests automatically. + + This mode is the default, when + :oslo.config:option:`libvirt.virt_type`\ =kvm|qemu is set in + ``/etc/nova/nova-cpu.conf`` on compute nodes. + + #. :oslo.config:option:`libvirt.cpu_mode`\ =host-passthrough + + When using the ``host-passthrough`` CPU mode, the ``md-clear`` CPU + flag will be passed through to the Nova guests automatically. + + #. :oslo.config:option:`libvirt.cpu_mode`\ =custom + + When using the ``custom`` CPU mode, you must *explicitly* enable the + CPU flag ``md-clear`` to the Nova instances, in addition to the + flags required for previous vulnerabilities, using the + :oslo.config:option:`libvirt.cpu_model_extra_flags`. For example: + + .. code-block:: ini + + [libvirt] + cpu_mode = custom + cpu_models = IvyBridge + cpu_model_extra_flags = spec-ctrl,ssbd,md-clear + +#. Reboot the compute node for the fixes to take effect. + + To minimize workload downtime, you may wish to live migrate all guests to + another compute node first. + +Once the above steps have been taken on every vulnerable compute node in the +deployment, each running guest in the cluster must be fully powered down, and +cold-booted (i.e. an explicit stop followed by a start), in order to activate +the new CPU models. This can be done by the guest administrators at a time of +their choosing. + +Validation +~~~~~~~~~~ + +After applying relevant updates, administrators can check the kernel's +``sysfs`` interface to see what mitigation is in place, by running the +following command on the host: + +.. code-block:: bash + + # cat /sys/devices/system/cpu/vulnerabilities/mds + Mitigation: Clear CPU buffers; SMT vulnerable + +To unpack the message "Mitigation: Clear CPU buffers; SMT vulnerable": + +- ``Mitigation: Clear CPU buffers`` means you have the "CPU buffer clearing" + mitigation enabled, which is mechanism to invoke a flush of various + exploitable CPU buffers by invoking a CPU instruction called "VERW". + +- ``SMT vulnerable`` means, depending on your workload, you may still be + vulnerable to SMT-related problems. You need to evaluate whether your + workloads need SMT (also called "Hyper-Threading") to be disabled or not. + Refer to the guidance from your Linux distribution and processor vendor. + +To see the other possible values for +``/sys/devices/system/cpu/vulnerabilities/mds``, refer to the `MDS system +information`__ section in Linux kernel's documentation for MDS. + +On the host, validate that KVM is capable of exposing the ``md-clear`` flag to +guests: + +.. code-block:: bash + + # virsh domcapabilities kvm | grep md-clear + + +More information can be found on the 'Diagnosis' tab of `this security notice +document`__. + +.. __: https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html#mds-system-information +.. __: https://access.redhat.com/security/vulnerabilities/mds + +Performance Impact +~~~~~~~~~~~~~~~~~~ + +Refer to this section titled "Performance Impact and Disabling MDS" from +`this security notice document`__, under the *Resolve* tab. + +.. note:: + + Although the article referred to is from Red Hat, the findings and + recommendations about performance impact apply for other distributions also. + +.. __: https://access.redhat.com/security/vulnerabilities/mds diff --git a/doc/source/admin/cpu-topologies.rst b/doc/source/admin/cpu-topologies.rst index 5c9174e1c33..179f7bd3775 100644 --- a/doc/source/admin/cpu-topologies.rst +++ b/doc/source/admin/cpu-topologies.rst @@ -7,8 +7,10 @@ control over how instances run on hypervisor CPUs and the topology of virtual CPUs available to instances. These features help minimize latency and maximize performance. +.. include:: /common/numa-live-migration-warning.txt + SMP, NUMA, and SMT -~~~~~~~~~~~~~~~~~~ +------------------ Symmetric multiprocessing (SMP) SMP is a design found in many modern multi-core systems. In an SMP system, @@ -44,8 +46,20 @@ In OpenStack, SMP CPUs are known as *cores*, NUMA cells or nodes are known as eight core system with Hyper-Threading would have four sockets, eight cores per socket and two threads per core, for a total of 64 CPUs. +PCPU and VCPU +------------- + +PCPU + Resource class representing an amount of dedicated CPUs for a single guest. + +VCPU + Resource class representing a unit of CPU resources for a single guest + approximating the processing power of a single physical processor. + +.. _numa-topologies: + Customizing instance NUMA placement policies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +-------------------------------------------- .. important:: @@ -77,14 +91,9 @@ vCPUs of different NUMA cells on the instance to the corresponding NUMA cells on the host. It will also expose the NUMA topology of the instance to the guest OS. -If you want compute to pin a particular vCPU as part of this process, -set the ``vcpu_pin_set`` parameter in the ``nova.conf`` configuration -file. For more information about the ``vcpu_pin_set`` parameter, see the -:doc:`/configuration/config`. - In all cases where NUMA awareness is used, the ``NUMATopologyFilter`` filter must be enabled. Details on this filter are provided in -:doc:`/admin/configuration/schedulers`. +:doc:`/admin/scheduling`. .. caution:: @@ -106,12 +115,14 @@ filter must be enabled. Details on this filter are provided in When used, NUMA awareness allows the operating system of the instance to intelligently schedule the workloads that it runs and minimize cross-node -memory bandwidth. To restrict an instance's vCPUs to a single host NUMA node, +memory bandwidth. To configure guest NUMA nodes, you can use the +:nova:extra-spec:`hw:numa_nodes` flavor extra spec. +For example, to restrict an instance's vCPUs to a single host NUMA node, run: .. code-block:: console - $ openstack flavor set m1.large --property hw:numa_nodes=1 + $ openstack flavor set $FLAVOR --property hw:numa_nodes=1 Some workloads have very demanding requirements for memory access latency or bandwidth that exceed the memory bandwidth available from a single NUMA node. @@ -122,40 +133,73 @@ nodes, run: .. code-block:: console - $ openstack flavor set m1.large --property hw:numa_nodes=2 + $ openstack flavor set $FLAVOR --property hw:numa_nodes=2 -The allocation of instances vCPUs and memory from different host NUMA nodes can +The allocation of instance vCPUs and memory from different host NUMA nodes can be configured. This allows for asymmetric allocation of vCPUs and memory, which -can be important for some workloads. To spread the 6 vCPUs and 6 GB of memory +can be important for some workloads. You can configure the allocation of +instance vCPUs and memory across each **guest** NUMA node using the +:nova:extra-spec:`hw:numa_cpus.{num}` and :nova:extra-spec:`hw:numa_mem.{num}` +extra specs respectively. +For example, to spread the 6 vCPUs and 6 GB of memory of an instance across two NUMA nodes and create an asymmetric 1:2 vCPU and memory mapping between the two nodes, run: .. code-block:: console - $ openstack flavor set m1.large --property hw:numa_nodes=2 - $ openstack flavor set m1.large \ # configure guest node 0 + $ openstack flavor set $FLAVOR --property hw:numa_nodes=2 + # configure guest node 0 + $ openstack flavor set $FLAVOR \ --property hw:numa_cpus.0=0,1 \ --property hw:numa_mem.0=2048 - $ openstack flavor set m1.large \ # configure guest node 1 + # configure guest node 1 + $ openstack flavor set $FLAVOR \ --property hw:numa_cpus.1=2,3,4,5 \ --property hw:numa_mem.1=4096 +.. note:: + + The ``{num}`` parameter is an index of *guest* NUMA nodes and may not + correspond to *host* NUMA nodes. For example, on a platform with two NUMA + nodes, the scheduler may opt to place guest NUMA node 0, as referenced in + ``hw:numa_mem.0`` on host NUMA node 1 and vice versa. Similarly, the + CPUs bitmask specified in the value for ``hw:numa_cpus.{num}`` refer to + *guest* vCPUs and may not correspond to *host* CPUs. As such, this feature + cannot be used to constrain instances to specific host CPUs or NUMA nodes. + +.. warning:: + + If the combined values of ``hw:numa_cpus.{num}`` or ``hw:numa_mem.{num}`` + are greater than the available number of CPUs or memory respectively, an + exception will be raised. + .. note:: Hyper-V does not support asymmetric NUMA topologies, and the Hyper-V driver will not spawn instances with such topologies. For more information about the syntax for ``hw:numa_nodes``, ``hw:numa_cpus.N`` -and ``hw:num_mem.N``, refer to the :ref:`NUMA -topology ` guide. +and ``hw:num_mem.N``, refer to :doc:`/configuration/extra-specs`. + + +.. _cpu-pinning-policies: Customizing instance CPU pinning policies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------------- .. important:: The functionality described below is currently only supported by the - libvirt/KVM driver. Hyper-V does not support CPU pinning. + libvirt/KVM driver and requires :ref:`some host configuration + ` for this to work. Hyper-V does not support CPU + pinning. + +.. note:: + + There is no correlation required between the NUMA topology exposed in the + instance and how the instance is actually pinned on the host. This is by + design. See this `invalid bug + `_ for more information. By default, instance vCPU processes are not assigned to any particular host CPU, instead, they float across host CPUs like any other process. This allows @@ -168,74 +212,216 @@ possible with the latency introduced by the default CPU policy. For such workloads, it is beneficial to control which host CPUs are bound to an instance's vCPUs. This process is known as pinning. No instance with pinned CPUs can use the CPUs of another pinned instance, thus preventing resource -contention between instances. To configure a flavor to use pinned vCPUs, a -use a dedicated CPU policy. To force this, run: +contention between instances. + +CPU pinning policies can be used to determine whether an instance should be +pinned or not. They can be configured using the +:nova:extra-spec:`hw:cpu_policy` extra spec and equivalent image metadata +property. There are three policies: ``dedicated``, ``mixed`` and +``shared`` (the default). The ``dedicated`` CPU policy is used to specify +that all CPUs of an instance should use pinned CPUs. To configure a flavor to +use the ``dedicated`` CPU policy, run: .. code-block:: console - $ openstack flavor set m1.large --property hw:cpu_policy=dedicated + $ openstack flavor set $FLAVOR --property hw:cpu_policy=dedicated -.. caution:: +This works by ensuring ``PCPU`` allocations are used instead of ``VCPU`` +allocations. As such, it is also possible to request this resource type +explicitly. To configure this, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR --property resources:PCPU=N + +(where ``N`` is the number of vCPUs defined in the flavor). + +.. note:: + + It is not currently possible to request ``PCPU`` and ``VCPU`` resources in + the same instance. + +The ``shared`` CPU policy is used to specify that an instance **should not** +use pinned CPUs. To configure a flavor to use the ``shared`` CPU policy, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR --property hw:cpu_policy=shared + +The ``mixed`` CPU policy is used to specify that an instance use pinned CPUs +along with unpinned CPUs. The instance pinned CPU could be specified in the +:nova:extra-spec:`hw:cpu_dedicated_mask` or, if :doc:`real-time ` is +enabled, in the :nova:extra-spec:`hw:cpu_realtime_mask` extra spec. For +example, to configure a flavor to use the ``mixed`` CPU policy with 4 vCPUs in +total and the first 2 vCPUs as pinned CPUs, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --vcpus=4 \ + --property hw:cpu_policy=mixed \ + --property hw:cpu_dedicated_mask=0-1 + +To configure a flavor to use the ``mixed`` CPU policy with 4 vCPUs in total and +the first 2 vCPUs as pinned **real-time** CPUs, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --vcpus=4 \ + --property hw:cpu_policy=mixed \ + --property hw:cpu_realtime=yes \ + --property hw:cpu_realtime_mask=0-1 + +.. note:: + + For more information about the syntax for ``hw:cpu_policy``, + ``hw:cpu_dedicated_mask``, ``hw:realtime_cpu`` and ``hw:cpu_realtime_mask``, + refer to :doc:`/configuration/extra-specs` + +.. note:: + + For more information about real-time functionality, refer to the + :doc:`documentation `. + +It is also possible to configure the CPU policy via image metadata. This can +be useful when packaging applications that require real-time or near real-time +behavior by ensuring instances created with a given image are always pinned +regardless of flavor. To configure an image to use the ``dedicated`` CPU +policy, run: + +.. code-block:: console - Host aggregates should be used to separate pinned instances from unpinned - instances as the latter will not respect the resourcing requirements of - the former. + $ openstack image set $IMAGE --property hw_cpu_policy=dedicated -When running workloads on SMT hosts, it is important to be aware of the impact -that thread siblings can have. Thread siblings share a number of components -and contention on these components can impact performance. To configure how -to use threads, a CPU thread policy should be specified. For workloads where -sharing benefits performance, use thread siblings. To force this, run: +Likewise, to configure an image to use the ``shared`` CPU policy, run: .. code-block:: console - $ openstack flavor set m1.large \ + $ openstack image set $IMAGE --property hw_cpu_policy=shared + +.. note:: + + For more information about image metadata, refer to the `Image metadata`_ + guide. + +.. important:: + + Flavor-based policies take precedence over image-based policies. For + example, if a flavor specifies a CPU policy of ``dedicated`` then that + policy will be used. If the flavor specifies a CPU policy of + ``shared`` and the image specifies no policy or a policy of ``shared`` then + the ``shared`` policy will be used. However, the flavor specifies a CPU + policy of ``shared`` and the image specifies a policy of ``dedicated``, or + vice versa, an exception will be raised. This is by design. Image metadata + is often configurable by non-admin users, while flavors are only + configurable by admins. By setting a ``shared`` policy through flavor + extra-specs, administrators can prevent users configuring CPU policies in + images and impacting resource utilization. + +Customizing instance CPU thread pinning policies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. important:: + + The functionality described below requires the use of pinned instances and + is therefore currently only supported by the libvirt/KVM driver and requires + :ref:`some host configuration ` for this to work. + Hyper-V does not support CPU pinning. + +When running pinned instances on SMT hosts, it may also be necessary to +consider the impact that thread siblings can have on the instance workload. The +presence of an SMT implementation like Intel Hyper-Threading can boost +performance `by up to 30%`__ for some workloads. However, thread siblings +share a number of components and contention on these components can diminish +performance for other workloads. For this reason, it is also possible to +explicitly request hosts with or without SMT. + +__ https://software.intel.com/en-us/articles/how-to-determine-the-effectiveness-of-hyper-threading-technology-with-an-application + +To configure whether an instance should be placed on a host with SMT or not, a +CPU thread policy may be specified. For workloads where sharing benefits +performance, you can request hosts **with** SMT. To configure this, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:cpu_thread_policy=require +This will ensure the instance gets scheduled to a host with SMT by requesting +hosts that report the ``HW_CPU_HYPERTHREADING`` trait. It is also possible to +request this trait explicitly. To configure this, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property resources:PCPU=N \ + --property trait:HW_CPU_HYPERTHREADING=required + For other workloads where performance is impacted by contention for resources, -use non-thread siblings or non-SMT hosts. To force this, run: +you can request hosts **without** SMT. To configure this, run: .. code-block:: console - $ openstack flavor set m1.large \ + $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:cpu_thread_policy=isolate -Finally, for workloads where performance is minimally impacted, use thread -siblings if available. This is the default, but it can be set explicitly: +This will ensure the instance gets scheduled to a host without SMT by +requesting hosts that **do not** report the ``HW_CPU_HYPERTHREADING`` trait. +It is also possible to request this trait explicitly. To configure this, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property resources:PCPU=N \ + --property trait:HW_CPU_HYPERTHREADING=forbidden + +Finally, for workloads where performance is minimally impacted, you may use +thread siblings if available and fallback to not using them if necessary. This +is the default, but it can be set explicitly: .. code-block:: console - $ openstack flavor set m1.large \ + $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:cpu_thread_policy=prefer -For more information about the syntax for ``hw:cpu_policy`` and -``hw:cpu_thread_policy``, refer to the :doc:`/admin/flavors` guide. +This does not utilize traits and, as such, there is no trait-based equivalent. -Applications are frequently packaged as images. For applications that require -real-time or near real-time behavior, configure image metadata to ensure -created instances are always pinned regardless of flavor. To configure an -image to use pinned vCPUs and avoid thread siblings, run: +.. note:: + + For more information about the syntax for ``hw:cpu_thread_policy``, refer to + :doc:`/configuration/extra-specs`. + +As with CPU policies, it also possible to configure the CPU thread policy via +image metadata. This can be useful when packaging applications that require +real-time or near real-time behavior by ensuring instances created with a given +image are always pinned regardless of flavor. To configure an image to use the +``require`` CPU policy, run: + +.. code-block:: console + + $ openstack image set $IMAGE \ + --property hw_cpu_policy=dedicated \ + --property hw_cpu_thread_policy=require + +Likewise, to configure an image to use the ``isolate`` CPU thread policy, run: .. code-block:: console - $ openstack image set [IMAGE_ID] \ + $ openstack image set $IMAGE \ --property hw_cpu_policy=dedicated \ --property hw_cpu_thread_policy=isolate -If the flavor specifies a CPU policy of ``dedicated`` then that policy will be -used. If the flavor explicitly specifies a CPU policy of ``shared`` and the -image specifies no policy or a policy of ``shared`` then the ``shared`` policy -will be used, but if the image specifies a policy of ``dedicated`` an exception -will be raised. By setting a ``shared`` policy through flavor extra-specs, -administrators can prevent users configuring CPU policies in images and -impacting resource utilization. To configure this policy, run: +Finally, to configure an image to use the ``prefer`` CPU thread policy, run: .. code-block:: console - $ openstack flavor set m1.large --property hw:cpu_policy=shared + $ openstack image set $IMAGE \ + --property hw_cpu_policy=dedicated \ + --property hw_cpu_thread_policy=prefer If the flavor does not specify a CPU thread policy then the CPU thread policy specified by the image (if any) will be used. If both the flavor and image @@ -244,59 +430,82 @@ an exception will be raised. .. note:: - There is no correlation required between the NUMA topology exposed in the - instance and how the instance is actually pinned on the host. This is by - design. See this `invalid bug - `_ for more information. + For more information about image metadata, refer to the `Image metadata`_ + guide. -For more information about image metadata, refer to the `Image metadata`_ -guide. +.. _emulator-thread-pinning-policies: Customizing instance emulator thread pinning policies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When guests need dedicated vCPU allocation, it may not be acceptable to allow -emulator threads to steal time from real-time vCPUs. - -In order to achieve emulator thread pinning, configure the -``hw:emulator_threads_policy`` flavor extra spec. Additionally, -``hw:cpu_policy`` needs to be set to ``dedicated``. The default value for -``hw:emulator_threads_policy`` is ``share``. +.. important:: -If you want to tell nova to reserve a dedicated CPU per instance for emulator -thread pinning, configure ``hw:emulator_threads_policy`` as ``isolate``. + The functionality described below requires the use of pinned instances and + is therefore currently only supported by the libvirt/KVM driver and requires + :ref:`some host configuration ` for this to work. + Hyper-V does not support CPU pinning. + +In addition to the work of the guest OS and applications running in an +instance, there is a small amount of overhead associated with the underlying +hypervisor. By default, these overhead tasks - known collectively as emulator +threads - run on the same host CPUs as the instance itself and will result in a +minor performance penalty for the instance. This is not usually an issue, +however, for things like real-time instances, it may not be acceptable for +emulator thread to steal time from instance CPUs. + +Emulator thread policies can be used to ensure emulator threads are run on +cores separate from those used by the instance. There are two policies: +``isolate`` and ``share``. The default is to run the emulator threads on the +same core. The ``isolate`` emulator thread policy is used to specify that +emulator threads for a given instance should be run on their own unique core, +chosen from one of the host cores listed in +:oslo.config:option:`compute.cpu_dedicated_set`. To configure a flavor to use +the ``isolate`` emulator thread policy, run: .. code-block:: console - $ openstack flavor set m1.large \ + $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:emulator_threads_policy=isolate -An instance spawned with these settings will have a dedicated physical CPU -which is chosen from the ``vcpu_pin_set`` in addition to the physical CPUs -which are reserved for the vCPUs. - -If you want to tell nova to pin the emulator threads to a shared set of -dedicated CPUs, configure ``hw:emulator_threads_policy`` as ``share``. +The ``share`` policy is used to specify that emulator threads from a given +instance should be run on the pool of host cores listed in +:oslo.config:option:`compute.cpu_shared_set` if configured, else across all +pCPUs of the instance. +To configure a flavor to use the ``share`` emulator thread policy, run: .. code-block:: console - $ openstack flavor set m1.large \ + $ openstack flavor set $FLAVOR \ --property hw:cpu_policy=dedicated \ --property hw:emulator_threads_policy=share -Additionally, set ``[compute]/cpu_shared_set`` in ``/etc/nova/nova.conf`` to -the set of host CPUs that should be used for best-effort CPU resources. - -.. code-block:: console +The above behavior can be summarized in this helpful table: + +.. list-table:: + :header-rows: 1 + :stub-columns: 1 + + * - + - :oslo.config:option:`compute.cpu_shared_set` set + - :oslo.config:option:`compute.cpu_shared_set` unset + * - ``hw:emulator_treads_policy`` unset (default) + - Pinned to all of the instance's pCPUs + - Pinned to all of the instance's pCPUs + * - ``hw:emulator_threads_policy`` = ``share`` + - Pinned to :oslo.config:option:`compute.cpu_shared_set` + - Pinned to all of the instance's pCPUs + * - ``hw:emulator_threads_policy`` = ``isolate`` + - Pinned to a single pCPU distinct from the instance's pCPUs + - Pinned to a single pCPU distinct from the instance's pCPUs - # crudini --set /etc/nova/nova.conf compute cpu_shared_set 4,5,8-11 +.. note:: -For more information about the syntax for ``hw:emulator_threads_policy``, -refer to the :doc:`/admin/flavors` guide. + For more information about the syntax for ``hw:emulator_threads_policy``, + refer to :nova:extra-spec:`the documentation `. Customizing instance CPU topologies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------- .. important:: @@ -322,17 +531,17 @@ sockets. Some workloads benefit from a custom topology. For example, in some operating systems, a different license may be needed depending on the number of CPU -sockets. To configure a flavor to use a maximum of two sockets, run: +sockets. To configure a flavor to use two sockets, run: .. code-block:: console - $ openstack flavor set m1.large --property hw:cpu_sockets=2 + $ openstack flavor set $FLAVOR --property hw:cpu_sockets=2 Similarly, to configure a flavor to use one core and one thread, run: .. code-block:: console - $ openstack flavor set m1.large \ + $ openstack flavor set $FLAVOR \ --property hw:cpu_cores=1 \ --property hw:cpu_threads=1 @@ -347,22 +556,25 @@ Similarly, to configure a flavor to use one core and one thread, run: with ten cores fails. For more information about the syntax for ``hw:cpu_sockets``, ``hw:cpu_cores`` -and ``hw:cpu_threads``, refer to the :doc:`/admin/flavors` guide. +and ``hw:cpu_threads``, refer to :doc:`/configuration/extra-specs`. It is also possible to set upper limits on the number of sockets, cores, and threads used. Unlike the hard values above, it is not necessary for this exact number to used because it only provides a limit. This can be used to provide some flexibility in scheduling, while ensuring certain limits are not -exceeded. For example, to ensure no more than two sockets are defined in the -instance topology, run: +exceeded. For example, to ensure no more than two sockets, eight cores and one +thread are defined in the instance topology, run: .. code-block:: console - $ openstack flavor set m1.large --property hw:cpu_max_sockets=2 + $ openstack flavor set $FLAVOR \ + --property hw:cpu_max_sockets=2 \ + --property hw:cpu_max_cores=8 \ + --property hw:cpu_max_threads=1 For more information about the syntax for ``hw:cpu_max_sockets``, -``hw:cpu_max_cores``, and ``hw:cpu_max_threads``, refer to the -:doc:`/admin/flavors` guide. +``hw:cpu_max_cores``, and ``hw:cpu_max_threads``, refer to +:doc:`/configuration/extra-specs`. Applications are frequently packaged as images. For applications that prefer certain CPU topologies, configure image metadata to hint that created instances @@ -371,7 +583,7 @@ request a two-socket, four-core per socket topology, run: .. code-block:: console - $ openstack image set [IMAGE_ID] \ + $ openstack image set $IMAGE \ --property hw_cpu_sockets=2 \ --property hw_cpu_cores=4 @@ -381,7 +593,7 @@ maximum of one thread, run: .. code-block:: console - $ openstack image set [IMAGE_ID] \ + $ openstack image set $IMAGE \ --property hw_cpu_max_sockets=2 \ --property hw_cpu_max_threads=1 @@ -394,10 +606,82 @@ topologies that might, for example, incur an additional licensing fees. For more information about image metadata, refer to the `Image metadata`_ guide. +.. _configure-libvirt-pinning: + +Configuring libvirt compute nodes for CPU pinning +------------------------------------------------- + +.. versionchanged:: 20.0.0 + + Prior to 20.0.0 (Train), it was not necessary to explicitly configure hosts + for pinned instances. However, it was not possible to place pinned instances + on the same host as unpinned CPUs, which typically meant hosts had to be + grouped into host aggregates. If this was not done, unpinned instances would + continue floating across all enabled host CPUs, even those that some + instance CPUs were pinned to. Starting in 20.0.0, it is necessary to + explicitly identify the host cores that should be used for pinned instances. + +Nova treats host CPUs used for unpinned instances differently from those used +by pinned instances. The former are tracked in placement using the ``VCPU`` +resource type and can be overallocated, while the latter are tracked using the +``PCPU`` resource type. By default, nova will report all host CPUs as ``VCPU`` +inventory, however, this can be configured using the +:oslo.config:option:`compute.cpu_shared_set` config option, to specify which +host CPUs should be used for ``VCPU`` inventory, and the +:oslo.config:option:`compute.cpu_dedicated_set` config option, to specify which +host CPUs should be used for ``PCPU`` inventory. + +Consider a compute node with a total of 24 host physical CPU cores with +hyperthreading enabled. The operator wishes to reserve 1 physical CPU core and +its thread sibling for host processing (not for guest instance use). +Furthermore, the operator wishes to use 8 host physical CPU cores and their +thread siblings for dedicated guest CPU resources. The remaining 15 host +physical CPU cores and their thread siblings will be used for shared guest vCPU +usage, with an 8:1 allocation ratio for those physical processors used for +shared guest CPU resources. + +The operator could configure ``nova.conf`` like so:: + + [DEFAULT] + cpu_allocation_ratio=8.0 + + [compute] + cpu_dedicated_set=2-17 + cpu_shared_set=18-47 + +The virt driver will construct a provider tree containing a single resource +provider representing the compute node and report inventory of ``PCPU`` and +``VCPU`` for this single provider accordingly:: + + COMPUTE NODE provider + PCPU: + total: 16 + reserved: 0 + min_unit: 1 + max_unit: 16 + step_size: 1 + allocation_ratio: 1.0 + VCPU: + total: 30 + reserved: 0 + min_unit: 1 + max_unit: 30 + step_size: 1 + allocation_ratio: 8.0 + +Instances using the ``dedicated`` CPU policy or an explicit ``PCPU`` resource +request, ``PCPU`` inventory will be consumed. Instances using the ``shared`` +CPU policy, meanwhile, will consume ``VCPU`` inventory. + +.. note:: + + ``PCPU`` and ``VCPU`` allocations are currently combined to calculate the + value for the ``cores`` quota class. + .. _configure-hyperv-numa: Configuring Hyper-V compute nodes for instance NUMA policies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------------------ Hyper-V is configured by default to allow instances to span multiple NUMA nodes, regardless if the instances have been configured to only span N NUMA @@ -439,6 +723,6 @@ memory allocation turned on. The Hyper-V driver will ignore the configured instances with a NUMA topology. .. Links -.. _`Image metadata`: https://docs.openstack.org/image-guide/image-metadata.html +.. _`Image metadata`: https://docs.openstack.org/image-guide/introduction.html#image-metadata .. _`discussion`: http://lists.openstack.org/pipermail/openstack-dev/2016-March/090367.html .. _`MTTCG project`: http://wiki.qemu.org/Features/tcg-multithread diff --git a/doc/source/admin/emulated-tpm.rst b/doc/source/admin/emulated-tpm.rst new file mode 100644 index 00000000000..5a1830e1a1f --- /dev/null +++ b/doc/source/admin/emulated-tpm.rst @@ -0,0 +1,131 @@ +======================================= +Emulated Trusted Platform Module (vTPM) +======================================= + +.. versionadded:: 22.0.0 (Victoria) + +Starting in the 22.0.0 (Victoria) release, Nova supports adding an emulated +virtual `Trusted Platform Module`__ (vTPM) to guests. + +.. __: https://en.wikipedia.org/wiki/Trusted_Platform_Module + + +Enabling vTPM +------------- + +The following are required on each compute host wishing to support the vTPM +feature: + +* Currently vTPM is only supported when using the libvirt compute driver with a + :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``. + +* A `key manager service`__, such as `barbican`__, must be configured to store + secrets used to encrypt the virtual device files at rest. + +* The swtpm__ binary and associated libraries__. + +* Set the :oslo.config:option:`libvirt.swtpm_enabled` config option to + ``True``. This will enable support for both TPM version 1.2 and 2.0. + +With the above requirements satisfied, verify vTPM support by inspecting the +traits on the compute node's resource provider: + +.. code:: bash + + $ COMPUTE_UUID=$(openstack resource provider list --name $HOST -f value -c uuid) + $ openstack resource provider trait list $COMPUTE_UUID | grep SECURITY_TPM + | COMPUTE_SECURITY_TPM_1_2 | + | COMPUTE_SECURITY_TPM_2_0 | + +.. __: https://docs.openstack.org/api-guide/key-manager/ +.. __: https://docs.openstack.org/barbican/latest/ +.. __: https://github.com/stefanberger/swtpm/wiki +.. __: https://github.com/stefanberger/libtpms/ + + +Configuring a flavor or image +----------------------------- + +A vTPM can be requested on a server via flavor extra specs or image metadata +properties. There are two versions supported - 1.2 and 2.0 - and two models - +TPM Interface Specification (TIS) and Command-Response Buffer (CRB). The CRB +model is only supported with version 2.0. + +.. list-table:: + :header-rows: 1 + + * - Flavor extra_specs + - Image metadata + - Description + * - ``hw:tpm_version`` + - ``hw_tpm_version`` + - Specify the TPM version, ``1.2`` or ``2.0``. Required if requesting a + vTPM. + * - ``hw:tpm_model`` + - ``hw_tpm_model`` + - Specify the TPM model, ``tpm-tis`` (the default) or ``tpm-crb`` (only + valid with version ``2.0``. + +For example, to configure a flavor to use the TPM 2.0 with the CRB model: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property hw:tpm_version=2.0 \ + --property hw:tpm_model=tpm-crb + +Scheduling will fail if flavor and image supply conflicting values, or if model +``tpm-crb`` is requested with version ``1.2``. + +Upon successful boot, the server should see a TPM device such as ``/dev/tpm0`` +which can be used in the same manner as a hardware TPM. + + +Limitations +----------- + +* Only server operations performed by the server owner are supported, as the + user's credentials are required to unlock the virtual device files on the + host. Thus the admin may need to decide whether to grant the user additional + policy roles; if not, those operations are effectively disabled. + +* Live migration, evacuation, shelving and rescuing of servers with vTPMs is + not currently supported. + + +Security +-------- + +With a hardware TPM, the root of trust is a secret known only to the TPM user. +In contrast, an emulated TPM comprises a file on disk which the libvirt daemon +must be able to present to the guest. At rest, this file is encrypted using a +passphrase stored in a key manager service. The passphrase in the key manager +is associated with the credentials of the owner of the server (the user who +initially created it). The passphrase is retrieved and used by libvirt to +unlock the emulated TPM data any time the server is booted. + +Although the above mechanism uses a libvirt secret__ that is both ``private`` +(can't be displayed via the libvirt API or ``virsh``) and ``ephemeral`` (exists +only in memory, never on disk), it is theoretically possible for a sufficiently +privileged user to retrieve the secret and/or vTPM data from memory. + +A full analysis and discussion of security issues related to emulated TPM is +beyond the scope of this document. + +.. __: https://libvirt.org/formatsecret.html#SecretAttributes + + +References +---------- + +* `TCG PC Client Specific TPM Interface Specification (TIS)`__ +* `TCG PC Client Platform TPM Profile (PTP) Specification`__ +* `QEMU docs on tpm`__ +* `Libvirt XML to request emulated TPM device`__ +* `Libvirt secret for usage type ``vtpm```__ + +.. __: https://trustedcomputinggroup.org/resource/pc-client-work-group-pc-client-specific-tpm-interface-specification-tis/ +.. __: https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/ +.. __: https://qemu.readthedocs.io/en/latest/specs/tpm.html +.. __: https://libvirt.org/formatdomain.html#elementsTpm +.. __: https://libvirt.org/formatsecret.html#vTPMUsageType diff --git a/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg b/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg index 563dea780b0..f2934118829 100644 --- a/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg +++ b/doc/source/admin/figures/SCH_5009_V00_NUAC-VNC_OpenStack.svg @@ -467,12 +467,12 @@ Sheet.53 - Browses the url returned Http://novncip:port/?token=xyz + Browses the url returned Http://novncip:port/?path=%3Ftoken%3Dxyz Browses the url returnedHttp://novncip:port/?token=xyz + x="4" dy="1.2em" class="st13">Http://novncip:port/?path=%3Ftoken%3Dxyz Sheet.28 diff --git a/doc/source/admin/file-backed-memory.rst b/doc/source/admin/file-backed-memory.rst index 22fbc951821..dffb3de3833 100644 --- a/doc/source/admin/file-backed-memory.rst +++ b/doc/source/admin/file-backed-memory.rst @@ -46,14 +46,22 @@ Libvirt capability requires libvirt version 4.4.0 or newer. Qemu - File-backed memory requires qemu version 2.6.0 or newer.Discard capability + File-backed memory requires qemu version 2.6.0 or newer. Discard capability requires qemu version 2.10.0 or newer. Memory overcommit File-backed memory is not compatible with memory overcommit. - ``ram_allocation_ratio`` must be set to ``1.0`` in ``nova.conf``, and the - host must not be added to a host aggregate with ``ram_allocation_ratio`` - set to anything but ``1.0``. + :oslo.config:option:`ram_allocation_ratio` must be set to ``1.0`` in + ``nova.conf``, and the host must not be added to a :doc:`host aggregate + ` with ``ram_allocation_ratio`` set to anything but + ``1.0``. + +Reserved memory + When configured, file-backed memory is reported as total system memory to + placement, with RAM used as cache. Reserved memory corresponds to disk + space not set aside for file-backed memory. + :oslo.config:option:`reserved_host_memory_mb` should be set to ``0`` in + ``nova.conf``. Huge pages File-backed memory is not compatible with huge pages. Instances with huge diff --git a/doc/source/admin/flavors.rst b/doc/source/admin/flavors.rst index d59730a623c..abf939d1119 100644 --- a/doc/source/admin/flavors.rst +++ b/doc/source/admin/flavors.rst @@ -19,8 +19,10 @@ manage flavors. To see information for this command, run: .. note:: Configuration rights can be delegated to additional users by redefining - the access controls for ``os_compute_api:os-flavor-manage`` in - ``/etc/nova/policy.json`` on the ``nova-api`` server. + the access controls for ``os_compute_api:os-flavor-manage:create``, + ``os_compute_api:os-flavor-manage:update`` and + ``os_compute_api:os-flavor-manage:delete`` in ``/etc/nova/policy.yaml`` + on the ``nova-api`` server. .. note:: @@ -111,12 +113,29 @@ and a new description as follows: .. code-block:: console - $ nova flavor-update FLAVOR DESCRIPTION + $ openstack --os-compute-api-version 2.55 flavor set --description .. note:: - There are no commands to update a description of a flavor - in the :command:`openstack` command currently (version 3.15.0). + The only field that can be updated is the description field. + Nova has historically intentionally not included an API to update + a flavor because that would be confusing for instances already + created with that flavor. Needing to change any other aspect of + a flavor requires deleting and/or creating a new flavor. + + Nova stores a serialized version of the flavor associated with an + instance record in the ``instance_extra`` table. While nova supports + `updating flavor extra_specs`_ it does not update the embedded flavor + in existing instances. Nova does not update the embedded flavor + as the extra_specs change may invalidate the current placement + of the instance or alter the compute context that has been + created for the instance by the virt driver. For this reason + admins should avoid updating extra_specs for flavors used by + existing instances. A resize can be used to update existing + instances if required but as a resize performs a cold migration + it is not transparent to a tenant. + +.. _updating flavor extra_specs: https://docs.openstack.org/api-ref/compute/?expanded=#update-an-extra-spec-for-a-flavor Delete a flavor --------------- diff --git a/doc/source/admin/huge-pages.rst b/doc/source/admin/huge-pages.rst index e53a58167e4..73f6c5dd2db 100644 --- a/doc/source/admin/huge-pages.rst +++ b/doc/source/admin/huge-pages.rst @@ -56,6 +56,7 @@ Enabling huge pages on the host ------------------------------- .. important:: + Huge pages may not be used on a host configured for file-backed memory. See :doc:`file-backed-memory` for details @@ -163,7 +164,7 @@ By default, an instance does not use huge pages for its underlying memory. However, huge pages can bring important or required performance improvements for some workloads. Huge pages must be requested explicitly through the use of flavor extra specs or image metadata. To request an instance use huge pages, -run: +you can use the :nova:extra-spec:`hw:mem_page_size` flavor extra spec: .. code-block:: console @@ -178,7 +179,7 @@ are assumed. To request an instance to use 2 MB huge pages, run one of: .. code-block:: console - $ openstack flavor set m1.large --property hw:mem_page_size=2Mb + $ openstack flavor set m1.large --property hw:mem_page_size=2MB .. code-block:: console @@ -205,7 +206,7 @@ run: $ openstack flavor set m1.large --property hw:mem_page_size=any For more information about the syntax for ``hw:mem_page_size``, refer to -:doc:`flavors`. +:nova:extra-spec:`the documentation `. Applications are frequently packaged as images. For applications that require the IO performance improvements that huge pages provides, configure image @@ -239,4 +240,4 @@ guide. .. Links .. _`Linux THP guide`: https://www.kernel.org/doc/Documentation/vm/transhuge.txt .. _`Linux hugetlbfs guide`: https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt -.. _`Image metadata`: https://docs.openstack.org/image-guide/image-metadata.html +.. _`Image metadata`: https://docs.openstack.org/image-guide/introduction.html#image-metadata diff --git a/doc/source/admin/hw-machine-type.rst b/doc/source/admin/hw-machine-type.rst new file mode 100644 index 00000000000..e8a0df87e4d --- /dev/null +++ b/doc/source/admin/hw-machine-type.rst @@ -0,0 +1,137 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +====================================================================== +hw_machine_type - Configuring and updating QEMU instance machine types +====================================================================== + +.. versionadded:: 12.0.0 (Liberty) + +.. versionchanged:: 23.0.0 (Wallaby) + + The libvirt driver now records the machine type of an instance at start up + allowing the ``[libvirt]hw_machine_type`` configurable to change over time + without impacting existing instances. + + Added ``nova-manage`` commands to control the machine_type of an instance. + +.. note:: + + The following only applies to environments using libvirt compute hosts. + +Introduction +------------ + +QEMU's machine type concept can be thought of as a virtual chipset that +provides certain default devices (e.g. PCIe graphics card, Ethernet controller, +SATA controller, etc). QEMU supports two main variants of "machine type" for +x86 hosts: (a) ``pc``, which corresponds to Intel's I440FX chipset (released in +1996) and (b) ``q35``, which corresponds to Intel's 82Q35 chipset (released in +2007). For AArch64 hosts, the machine type is called: ``virt``. + +The ``pc`` machine type is considered legacy, and does not support many modern +features. Although at this time of writing, upstream QEMU has not reached an +agreement to remove new versioned variants of the ``pc`` machine type, some +long-term stable Linux distributions (CentOS, RHEL, possibly others) are moving +to support ``q35`` only. + +Configure +--------- + +For end users the machine type of an instance is controlled by the selection of +an image with the `hw_machine_type image metadata property`__ set. + +.. __: https://docs.openstack.org/glance/latest/admin/useful-image-properties.html + +.. code-block:: shell + + $ openstack image set --property hw_machine_type=q35 $IMAGE + +The libvirt virt driver supports the configuration of a per compute host +default machine type via the :oslo.config:option:`libvirt.hw_machine_type` +option. Providing a default machine type per host architecture to be used when +no corresponding ``hw_machine_type`` image property is provided for the +instance. + +When this option is not defined the libvirt driver relies on the following +`hardcoded dictionary`__ of default machine types per architecture: + +.. __: https://github.com/openstack/nova/blob/dc93e3b510f53d5b2198c8edd22528f0c899617e/nova/virt/libvirt/utils.py#L631-L638 + +.. code-block:: python + + default_mtypes = { + obj_fields.Architecture.ARMV7: "virt", + obj_fields.Architecture.AARCH64: "virt", + obj_fields.Architecture.S390: "s390-ccw-virtio", + obj_fields.Architecture.S390X: "s390-ccw-virtio", + obj_fields.Architecture.I686: "pc", + obj_fields.Architecture.X86_64: "pc", + } + +Update +------ + +Prior to the Wallaby (23.0.0) release the +:oslo.config:option:`libvirt.hw_machine_type` option had to remain static once +set for the lifetime of a deployment. This was due to the machine type of +instances without a ``hw_machine_type`` image property using the newly +configured machine types after a hard reboot or migration This could in turn +break the internal ABI of the instance when changing between underlying machine +types such as ``pc`` to ``q35``. + +From the Wallaby (23.0.0) release it is now possible to change the +:oslo.config:option:`libvirt.hw_machine_type` config once all instances have a +machine type recorded within the system metadata of the instance. + +To allow this the libvirt driver will now attempt to record the machine type +for any instance that doesn't already have it recorded during start up of the +compute service or initial spawn of an instance. This should ensure a machine +type is recorded for all instances after an upgrade to Wallaby that are not in +a ``SHELVED_OFFLOADED`` state. + +To record a machine type for instances in a ``SHELVED_OFFLOADED`` state after +an upgrade to Wallaby a new :program:`nova-manage` command has been introduced +to initially record the machine type of an instance. + +.. code-block:: shell + + $ nova-manage libvirt update_machine_type $instance $machine_type + +This command can also be used later to update the specific machine type used by +the instance. An additional :program:`nova-manage` command is also available to +fetch the machine type of a specific instance: + +.. code-block:: shell + + $ nova-manage libvirt get_machine_type $instance + +To confirm that all instances within an environment or a specific cell have had +a machine type recorded another :program:`nova-manage` command can be used: + +.. code-block:: shell + + $ nova-manage libvirt list_unset_machine_type + +The logic behind this command is also used by a new :program:`nova-status` +upgrade check that will fail with a warning when instances without a machine +type set exist in an environment. + +.. code-block:: shell + + $ nova-status upgrade check + +Once it has been verified that all instances within the environment or specific +cell have had a machine type recorded then the +:oslo.config:option:`libvirt.hw_machine_type` can be updated without impacting +existing instances. diff --git a/doc/source/admin/image-caching.rst b/doc/source/admin/image-caching.rst new file mode 100644 index 00000000000..a5475c15bbe --- /dev/null +++ b/doc/source/admin/image-caching.rst @@ -0,0 +1,113 @@ +============= +Image Caching +============= + +Nova supports caching base images on compute nodes when using a +`supported virt driver`_. + +.. _supported virt driver: https://docs.openstack.org/nova/latest/user/support-matrix.html#operation_cache_images + +What is Image Caching? +---------------------- + +In order to understand what image caching is and why it is beneficial, +it helps to be familiar with the process by which an instance is +booted from a given base image. When a new instance is created on a +compute node, the following general steps are performed by the compute +manager in conjunction with the virt driver: + +#. Download the base image from glance +#. Copy or COW the base image to create a new root disk image for the instance +#. Boot the instance using the new root disk image + +The first step involves downloading the entire base image to the local +disk on the compute node, which could involve many gigabytes of +network traffic, storage, and many minutes of latency between the +start of the boot process and actually running the instance. When the +virt driver supports image caching, step #1 above may be skipped if +the base image is already present on the compute node. This is most +often the case when another instance has been booted on that node from +the same base image recently. If present, the download operation can +be skipped, which greatly reduces the time-to-boot for the second and +subsequent instances that use the same base image, as well as avoids +load on the glance server and the network connection. + +By default, the compute node will periodically scan the images it has +cached, looking for base images that are not used by any instances on +the node that are older than a configured lifetime (24 hours by +default). Those unused images are deleted from the cache directory +until they are needed again. + +For more information about configuring image cache behavior, see the +documentation for the configuration options in the +:oslo.config:group:`image_cache` group. + +.. note:: + + Some ephemeral backend drivers may not use or need image caching, + or may not behave in the same way as others. For example, when + using the ``rbd`` backend with the ``libvirt`` driver and a shared + pool with glance, images are COW'd at the storage level and thus + need not be downloaded (and thus cached) at the compute node at + all. + +Image Caching Resource Accounting +--------------------------------- + +Generally the size of the image cache is not part of the data Nova +includes when reporting available or consumed disk space. This means +that when ``nova-compute`` reports 100G of total disk space, the +scheduler will assume that 100G of instances may be placed +there. Usually disk is the most plentiful resource and thus the last +to be exhausted, so this is often not problematic. However, if many +instances are booted from distinct images, all of which need to be +cached in addition to the disk space used by the instances themselves, +Nova may overcommit the disk unintentionally by failing to consider +the size of the image cache. + +There are two approaches to addressing this situation: + +#. **Mount the image cache as a separate filesystem**. This will + cause Nova to report the amount of disk space available purely to + instances, independent of how much is consumed by the cache. Nova + will continue to disregard the size of the image cache and, if the + cache space is exhausted, builds will fail. However, available + disk space for instances will be correctly reported by + ``nova-compute`` and accurately considered by the scheduler. + +#. **Enable optional reserved disk amount behavior**. The + configuration workaround + :oslo.config:option:`workarounds.reserve_disk_resource_for_image_cache` + will cause ``nova-compute`` to periodically update the reserved disk + amount to include the statically configured value, as well as the + amount currently consumed by the image cache. This will cause the + scheduler to see the available disk space decrease as the image + cache grows. This is not updated synchronously and thus is not a + perfect solution, but should vastly increase the scheduler's + visibility resulting in better decisions. (Note this solution is + currently libvirt-specific) + +As above, not all backends and virt drivers use image caching, and +thus a third option may be to consider alternative infrastructure to +eliminate this problem altogether. + +Image pre-caching +----------------- + +It may be beneficial to pre-cache images on compute nodes in order to +achieve low time-to-boot latency for new instances immediately. This +is often useful when rolling out a new version of an application where +downtime is important and having the new images already available on +the compute nodes is critical. + +Nova provides (since the Ussuri release) a mechanism to request that +images be cached without having to boot an actual instance on a +node. This best-effort service operates at the host aggregate level in +order to provide an efficient way to indicate that a large number of +computes should receive a given set of images. If the computes that +should pre-cache an image are not already in a defined host aggregate, +that must be done first. + +For information on how to perform aggregate-based image pre-caching, +see the :ref:`image-caching-aggregates` section of the Host aggregates +documentation. diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index f68e006189c..960034ab8ff 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -11,38 +11,148 @@ Compute does not include virtualization software. Instead, it defines drivers that interact with underlying virtualization mechanisms that run on your host operating system, and exposes functionality over a web-based API. + +Overview +-------- + +To effectively administer compute, you must understand how the different +installed nodes interact with each other. Compute can be installed in many +different ways using multiple servers, but generally multiple compute nodes +control the virtual servers and a cloud controller node contains the remaining +Compute services. + +The Compute cloud works using a series of daemon processes named ``nova-*`` +that exist persistently on the host machine. These binaries can all run on the +same machine or be spread out on multiple boxes in a large deployment. The +responsibilities of services and drivers are: + +.. rubric:: Services + +``nova-api`` + Receives XML requests and sends them to the rest of the system. A WSGI app + routes and authenticates requests. Supports the OpenStack Compute APIs. A + ``nova.conf`` configuration file is created when Compute is installed. + +.. todo:: + + Describe nova-api-metadata, nova-api-os-compute, nova-serialproxy and + nova-spicehtml5proxy + + nova-console, nova-dhcpbridge and nova-xvpvncproxy are all deprecated for + removal so they can be ignored. + +``nova-compute`` + Manages virtual machines. Loads a Service object, and exposes the public + methods on ComputeManager through a Remote Procedure Call (RPC). + +``nova-conductor`` + Provides database-access support for compute nodes (thereby reducing security + risks). + +``nova-scheduler`` + Dispatches requests for new virtual machines to the correct node. + +``nova-novncproxy`` + Provides a VNC proxy for browsers, allowing VNC consoles to access virtual + machines. + +.. note:: + + Some services have drivers that change how the service implements its core + functionality. For example, the ``nova-compute`` service supports drivers + that let you choose which hypervisor type it can use. + +.. toctree:: + :maxdepth: 2 + + manage-volumes + flavors + default-ports + admin-password-injection + manage-the-cloud + manage-logs + root-wrap-reference + configuring-migrations + live-migration-usage + remote-console-access + service-groups + node-down + scheduling + upgrades + + +Advanced configuration +---------------------- + +OpenStack clouds run on platforms that differ greatly in the capabilities that +they provide. By default, the Compute service seeks to abstract the underlying +hardware that it runs on, rather than exposing specifics about the underlying +host platforms. This abstraction manifests itself in many ways. For example, +rather than exposing the types and topologies of CPUs running on hosts, the +service exposes a number of generic CPUs (virtual CPUs, or vCPUs) and allows +for overcommitting of these. In a similar manner, rather than exposing the +individual types of network devices available on hosts, generic +software-powered network ports are provided. These features are designed to +allow high resource utilization and allows the service to provide a generic +cost-effective and highly scalable cloud upon which to build applications. + +This abstraction is beneficial for most workloads. However, there are some +workloads where determinism and per-instance performance are important, if not +vital. In these cases, instances can be expected to deliver near-native +performance. The Compute service provides features to improve individual +instance for these kind of workloads. + +.. include:: /common/numa-live-migration-warning.txt + +.. toctree:: + :maxdepth: 2 + + pci-passthrough + cpu-topologies + real-time + huge-pages + virtual-gpu + file-backed-memory + ports-with-resource-requests + virtual-persistent-memory + emulated-tpm + uefi + secure-boot + sev + managing-resource-providers + resource-limits + cpu-models + libvirt-misc + + +Additional guides +----------------- + +.. TODO(mriedem): This index page has a lot of content which should be + organized into groups for things like configuration, operations, + troubleshooting, etc. + .. toctree:: :maxdepth: 2 - admin-password-injection.rst - adv-config.rst - arch.rst - availability-zones.rst - configuring-migrations.rst - cpu-topologies.rst - default-ports.rst - evacuate.rst - flavors.rst - huge-pages.rst - live-migration-usage.rst - manage-logs.rst - manage-the-cloud.rst - manage-users.rst - manage-volumes.rst - migration.rst - migrate-instance-with-snapshot.rst - networking-nova.rst - networking.rst - node-down.rst - pci-passthrough.rst - quotas2.rst - quotas.rst - remote-console-access.rst - root-wrap-reference.rst - security-groups.rst - security.rst - service-groups.rst - services.rst - ssh-configuration.rst - support-compute.rst - system-admin.rst + aggregates + arch + availability-zones + cells + config-drive + configuration/index + evacuate + image-caching + metadata-service + migration + migrate-instance-with-snapshot + networking + quotas + security-groups + security + services + ssh-configuration + support-compute + secure-live-migration-with-qemu-native-tls + vendordata + hw-machine-type diff --git a/doc/source/admin/libvirt-misc.rst b/doc/source/admin/libvirt-misc.rst new file mode 100644 index 00000000000..87dbe18ea47 --- /dev/null +++ b/doc/source/admin/libvirt-misc.rst @@ -0,0 +1,140 @@ +====================== +Other libvirt features +====================== + +The libvirt driver supports a large number of additional features that don't +warrant their own section. These are gathered here. + + +Guest agent support +------------------- + +Guest agents enable optional access between compute nodes and guests through a +socket, using the QMP protocol. + +To enable this feature, you must set ``hw_qemu_guest_agent=yes`` as a metadata +parameter on the image you wish to use to create the guest-agent-capable +instances from. You can explicitly disable the feature by setting +``hw_qemu_guest_agent=no`` in the image metadata. + + +.. _extra-specs-watchdog-behavior: + +Watchdog behavior +----------------- + +.. versionchanged:: 15.0.0 (Ocata) + + Add support for the ``disabled`` option. + +A virtual watchdog device can be used to keep an eye on the guest server and +carry out a configured action if the server hangs. The watchdog uses the +i6300esb device (emulating a PCI Intel 6300ESB). Watchdog behavior can be +configured using the :nova:extra-spec:`hw:watchdog_action` flavor extra spec or +equivalent image metadata property. If neither the extra spec not the image +metadata property are specified, the watchdog is disabled. + +For example, to enable the watchdog and configure it to forcefully reset the +guest in the event of a hang, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR --property hw:watchdog_action=reset + +.. note:: + + Watchdog behavior set using the image metadata property will override + behavior set using the flavor extra spec. + + +.. _extra-specs-random-number-generator: + +Random number generator +----------------------- + +.. versionchanged:: 21.0.0 (Ussuri) + + Random number generators are now enabled by default for instances. + +Operating systems require good sources of entropy for things like cryptographic +software. If a random-number generator device has been added to the instance +through its image properties, the device can be enabled and configured using +the :nova:extra-spec:`hw_rng:allowed`, :nova:extra-spec:`hw_rng:rate_bytes` and +:nova:extra-spec:`hw_rng:rate_period` flavor extra specs. + +To configure for example a byte rate of 5 bytes per period and a period of 1000 +mSec (1 second), run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property hw_rng:rate_bytes=5 \ + --property hw_rng:rate_period=1000 + +Alternatively, to disable the random number generator, run: + +.. code-block:: console + + $ openstack flavor set $FLAVOR --property hw_rng:allowed=false + +The presence of separate byte rate and rate period configurables is +intentional. As noted in the `QEMU docs`__, a smaller rate and larger period +minimizes the opportunity for malicious guests to starve other guests of +entropy but at the cost of responsiveness. Conversely, larger rates and smaller +periods will increase the burst rate but at the potential cost of warping +resource consumption in favour of a greedy guest. + +.. __: https://wiki.qemu.org/Features/VirtIORNG#Effect_of_the_period_parameter + + +.. _extra-specs-performance-monitoring-unit: + +Performance Monitoring Unit (vPMU) +---------------------------------- + +.. versionadded:: 20.0.0 (Train) + +If nova is deployed with the libvirt virt driver and +:oslo.config:option:`libvirt.virt_type` is set to ``qemu`` or ``kvm``, a +virtual performance monitoring unit (vPMU) can be enabled or disabled for an +instance using the :nova:extra-spec:`hw:pmu` flavor extra spec or ``hw_pmu`` +image metadata property. +If the vPMU is not explicitly enabled or disabled via +the flavor or image, its presence is left to QEMU to decide. + +For example, to explicitly disable the vPMU, run: + +.. code-block:: console + + $ openstack flavor set FLAVOR-NAME --property hw:pmu=false + +The vPMU is used by tools like ``perf`` in the guest to provide more accurate +information for profiling application and monitoring guest performance. +For :doc:`real time ` workloads, the emulation of a vPMU can +introduce additional latency which would be undesirable. If the telemetry it +provides is not required, the vPMU can be disabled. For most workloads the +default of unset (enabled) will be correct. + + +.. _extra-specs-hiding-hypervisor-signature: + +Hiding hypervisor signature +--------------------------- + +.. versionadded:: 18.0.0 (Rocky) + +.. versionchanged:: 21.0.0 (Ussuri) + + Prior to the Ussuri release, this was called ``hide_hypervisor_id``. An + alias is provided to provide backwards compatibility. + +Some hypervisors add a signature to their guests. While the presence of the +signature can enable some paravirtualization features on the guest, it can also +have the effect of preventing some drivers from loading. You can hide this +signature by setting the :nova:extra-spec:`hw:hide_hypervisor_id` to true. + +For example, to hide your signature from the guest OS, run: + +.. code:: console + + $ openstack flavor set $FLAVOR --property hw:hide_hypervisor_id=true diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst index bf848cf852b..783ab5e27c2 100644 --- a/doc/source/admin/live-migration-usage.rst +++ b/doc/source/admin/live-migration-usage.rst @@ -67,9 +67,8 @@ Manual selection of the destination host +----+------------------+-------+----------+---------+-------+----------------------------+ | 3 | nova-conductor | HostA | internal | enabled | up | 2017-02-18T09:42:29.000000 | | 4 | nova-scheduler | HostA | internal | enabled | up | 2017-02-18T09:42:26.000000 | - | 5 | nova-consoleauth | HostA | internal | enabled | up | 2017-02-18T09:42:29.000000 | - | 6 | nova-compute | HostB | nova | enabled | up | 2017-02-18T09:42:29.000000 | - | 7 | nova-compute | HostC | nova | enabled | up | 2017-02-18T09:42:29.000000 | + | 5 | nova-compute | HostB | nova | enabled | up | 2017-02-18T09:42:29.000000 | + | 6 | nova-compute | HostC | nova | enabled | up | 2017-02-18T09:42:29.000000 | +----+------------------+-------+----------+---------+-------+----------------------------+ #. Check that ``HostC`` has enough resources for migration: @@ -218,9 +217,25 @@ What to do when the migration times out During the migration process, the instance may write to a memory page after that page has been copied to the destination. When that happens, the same page has to be copied again. The instance may write to memory pages faster than they -can be copied, so that the migration cannot complete. The Compute service will -cancel it when the ``live_migration_completion_timeout``, a configuration -parameter, is reached. +can be copied, so that the migration cannot complete. There are two optional +actions, controlled by +:oslo.config:option:`libvirt.live_migration_timeout_action`, which can be +taken against a VM after +:oslo.config:option:`libvirt.live_migration_completion_timeout` is reached: + +1. ``abort`` (default): The live migration operation will be cancelled after + the completion timeout is reached. This is similar to using API + ``DELETE /servers/{server_id}/migrations/{migration_id}``. + +2. ``force_complete``: The compute service will either pause the VM or trigger + post-copy depending on if post copy is enabled and available + (:oslo.config:option:`libvirt.live_migration_permit_post_copy` is set to + `True`). This is similar to using API + ``POST /servers/{server_id}/migrations/{migration_id}/action (force_complete)``. + +You can also read the +:oslo.config:option:`libvirt.live_migration_timeout_action` +configuration option help for more details. The following remarks assume the KVM/Libvirt hypervisor. @@ -238,16 +253,6 @@ out: WARNING nova.virt.libvirt.migration [req-...] [instance: ...] live migration not completed after 1800 sec -The Compute service also cancels migrations when the memory copy seems to make -no progress. Ocata disables this feature by default, but it can be enabled -using the configuration parameter ``live_migration_progress_timeout``. Should -this be the case, you may find the following message in the log: - -.. code-block:: console - - WARNING nova.virt.libvirt.migration [req-...] [instance: ...] - live migration stuck for 150 sec - Addressing migration timeouts ----------------------------- @@ -312,3 +317,7 @@ To make live-migration succeed, you have several options: - Post-copy may lead to an increased page fault rate during migration, which can slow the instance down. + +If live migrations routinely timeout or fail during cleanup operations due +to the user token timing out, consider configuring nova to use +:ref:`service user tokens `. diff --git a/doc/source/admin/manage-the-cloud.rst b/doc/source/admin/manage-the-cloud.rst index 33cc911a8b1..b6080bcfd90 100644 --- a/doc/source/admin/manage-the-cloud.rst +++ b/doc/source/admin/manage-the-cloud.rst @@ -6,7 +6,7 @@ Manage the cloud .. toctree:: - common/nova-show-usage-statistics-for-hosts-instances.rst + common/nova-show-usage-statistics-for-hosts-instances System administrators can use the :command:`openstack` to manage their clouds. diff --git a/doc/source/admin/manage-users.rst b/doc/source/admin/manage-users.rst deleted file mode 100644 index 41a925ff76f..00000000000 --- a/doc/source/admin/manage-users.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. _section_manage-compute-users: - -==================== -Manage Compute users -==================== - -Access to the Euca2ools (ec2) API is controlled by an access key and a secret -key. The user's access key needs to be included in the request, and the request -must be signed with the secret key. Upon receipt of API requests, Compute -verifies the signature and runs commands on behalf of the user. - -To begin using Compute, you must create a user with the Identity service. diff --git a/doc/source/admin/manage-volumes.rst b/doc/source/admin/manage-volumes.rst index 1ac2a7a499a..a9d705a47aa 100644 --- a/doc/source/admin/manage-volumes.rst +++ b/doc/source/admin/manage-volumes.rst @@ -23,8 +23,8 @@ to the :cinder-doc:`block storage admin guide ` for more details about creating multiattach-capable volumes. -Boot from volume and attaching a volume to a server that is not -SHELVED_OFFLOADED is supported. Ultimately the ability to perform +:term:`Boot from volume ` and attaching a volume to a server +that is not SHELVED_OFFLOADED is supported. Ultimately the ability to perform these actions depends on the compute host and hypervisor driver that is being used. @@ -65,15 +65,152 @@ Testing ~~~~~~~ Continuous integration testing of the volume multiattach feature is done -via the ``nova-multiattach`` job, defined in the `nova repository`_. - -The tests are defined in the `tempest repository`_. - -The CI job is setup to run with the **libvirt** compute driver and the **lvm** -volume back end. It purposefully does not use the Pike Ubuntu Cloud Archive -package mirror so that it gets qemu<2.10. +via the ``tempest-full`` and ``tempest-slow`` jobs, which, along with the +tests themselves, are defined in the `tempest repository`_. .. _added support for multiattach volumes: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/multi-attach-volume.html .. _recorded overview and demo: https://www.youtube.com/watch?v=hZg6wqxdEHk -.. _nova repository: http://git.openstack.org/cgit/openstack/nova/tree/playbooks/legacy/nova-multiattach/run.yaml .. _tempest repository: http://codesearch.openstack.org/?q=CONF.compute_feature_enabled.volume_multiattach&i=nope&files=&repos=tempest + +Managing volume attachments +--------------------------- + +During the lifecycle of an instance admins may need to check various aspects of +how a given volume is mapped both to an instance and the underlying compute +hosting the instance. This could even include refreshing different elements of +the attachment to ensure the latest configuration changes within the +environment have been applied. + +Checking an existing attachment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Existing volume attachments can be checked using the following +:python-openstackclient-doc:`OpenStack Client commands `: + +List all volume attachments for a given instance: + +.. code-block:: shell + + $ openstack server volume list 216f9481-4c9d-4530-b865-51cedfa4b8e7 + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ + | ID | Device | Server ID | Volume ID | + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ + | 8b9b3491-f083-4485-8374-258372f3db35 | /dev/vdb | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | 8b9b3491-f083-4485-8374-258372f3db35 | + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ + +List all volume attachments for a given instance with the Cinder volume +attachment and Block Device Mapping UUIDs also listed with microversion >=2.89: + +.. code-block:: shell + + $ openstack --os-compute-api-version 2.89 server volume list 216f9481-4c9d-4530-b865-51cedfa4b8e7 + +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+ + | Device | Server ID | Volume ID | Tag | Delete On Termination? | Attachment ID | BlockDeviceMapping UUID | + +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+ + | /dev/vdb | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | 8b9b3491-f083-4485-8374-258372f3db35 | None | False | d338fb38-cfd5-461f-8753-145dcbdb6c78 | 4e957e6d-52f2-44da-8cf8-3f1ab755e26d | + +----------+--------------------------------------+--------------------------------------+------+------------------------+--------------------------------------+--------------------------------------+ + +List all Cinder volume attachments for a given volume from microversion >= +3.27: + +.. code-block:: shell + + $ openstack --os-volume-api-version 3.27 volume attachment list --volume-id 8b9b3491-f083-4485-8374-258372f3db35 + +--------------------------------------+--------------------------------------+--------------------------------------+----------+ + | ID | Volume ID | Server ID | Status | + +--------------------------------------+--------------------------------------+--------------------------------------+----------+ + | d338fb38-cfd5-461f-8753-145dcbdb6c78 | 8b9b3491-f083-4485-8374-258372f3db35 | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | attached | + +--------------------------------------+--------------------------------------+--------------------------------------+----------+ + +Show the details of a Cinder volume attachment from microversion >= 3.27: + +.. code-block:: shell + + $ openstack --os-volume-api-version 3.27 volume attachment show d338fb38-cfd5-461f-8753-145dcbdb6c78 + +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | ID | d338fb38-cfd5-461f-8753-145dcbdb6c78 | + | Volume ID | 8b9b3491-f083-4485-8374-258372f3db35 | + | Instance ID | 216f9481-4c9d-4530-b865-51cedfa4b8e7 | + | Status | attached | + | Attach Mode | rw | + | Attached At | 2021-09-14T13:03:38.000000 | + | Detached At | | + | Properties | access_mode='rw', attachment_id='d338fb38-cfd5-461f-8753-145dcbdb6c78', auth_method='CHAP', auth_password='4XyNNFV2TLPhKXoP', auth_username='jsBMQhWZJXupA4eWHLQG', cacheable='False', driver_volume_type='iscsi', encrypted='False', qos_specs=, target_discovered='False', target_iqn='iqn.2010-10.org.openstack:volume-8b9b3491-f083-4485-8374-258372f3db35', target_lun='0', target_portal='192.168.122.99:3260', volume_id='8b9b3491-f083-4485-8374-258372f3db35' | + +-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +Refresh a volume attachment with nova-manage +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 24.0.0 (Xena) + +Admins may also refresh an existing volume attachment using the following +:program:`nova-manage` commands. + +.. note:: + + Users can also refresh volume attachments by shelving and later unshelving + their instances. The following is an alternative to that workflow and + useful for admins when having to mass refresh attachments across an + environment. + +.. note:: + + Future work will look into introducing an os-refresh admin API that will + include orchestrating the shutdown of an instance and refreshing volume + attachments among other things. + +To begin the admin can use the `volume_attachment show` subcommand to dump +existing details of the attachment directly from the Nova database. This +includes the stashed `connection_info` not shared by the API. + +.. code-block:: shell + + $ nova-manage volume_attachment show 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 --json | jq .attachment_id + "d338fb38-cfd5-461f-8753-145dcbdb6c78" + +If the stored `connection_info` or `attachment_id` are incorrect then the +admin may want to refresh the attachment to the compute host entirely by +recreating the Cinder volume attachment record(s) and pulling down fresh +`connection_info`. To do this we first need to ensure the instance is stopped: + +.. code-block:: shell + + $ openstack server stop 216f9481-4c9d-4530-b865-51cedfa4b8e7 + +Once stopped the host connector of the compute hosting the instance has to be +fetched using the `volume_attachment get_connector` subcommand: + +.. code-block:: shell + + root@compute $ nova-manage volume_attachment get_connector --json > connector.json + +.. note:: + + Future work will remove this requirement and incorperate the gathering of + the host connector into the main refresh command. Unfortunatley until then + it must remain a seperate manual step. + +We can then provide this connector to the `volume_attachment refresh` +subcommand. This command will connect to the compute, disconnect any host +volume connections, delete the existing Cinder volume attachment, +recreate the volume attachment and finally update Nova's database. + +.. code-block:: shell + + $ nova-manage volume_attachment refresh 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 connector.json + +The Cinder volume attachment and connection_info stored in the Nova database +should now be updated: + +.. code-block:: shell + + $ nova-manage volume_attachment show 216f9481-4c9d-4530-b865-51cedfa4b8e7 8b9b3491-f083-4485-8374-258372f3db35 --json | jq .attachment_id + "9ce46f49-5cfc-4c6c-b2f0-0287540d3246" + +The instance can then be restarted and the event list checked + +.. code-block:: shell + + $ openstack server start $instance diff --git a/doc/source/admin/managing-resource-providers.rst b/doc/source/admin/managing-resource-providers.rst new file mode 100644 index 00000000000..27bfe20140a --- /dev/null +++ b/doc/source/admin/managing-resource-providers.rst @@ -0,0 +1,216 @@ +============================================== +Managing Resource Providers Using Config Files +============================================== + +In order to facilitate management of resource provider information in the +Placement API, Nova provides `a method`__ for admins to add custom inventory +and traits to resource providers using YAML files. + +__ https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/provider-config-file.html + +.. note:: + + Only ``CUSTOM_*`` resource classes and traits may be managed this way. + +Placing Files +------------- + +Nova-compute will search for ``*.yaml`` files in the path specified in +:oslo.config:option:`compute.provider_config_location`. These files will be +loaded and validated for errors on nova-compute startup. If there are any +errors in the files, nova-compute will fail to start up. + +Administrators should ensure that provider config files have appropriate +permissions and ownership. See the `specification`__ and `admin guide`__ +for more details. + +__ https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/provider-config-file.html +__ https://docs.openstack.org/nova/latest/admin/managing-resource-providers.html + +.. note:: + + The files are loaded once at nova-compute startup and any changes or new + files will not be recognized until the next nova-compute startup. + +Examples +-------- + +Resource providers to target can be identified by either UUID or name. In +addition, the value ``$COMPUTE_NODE`` can be used in the UUID field to +identify all nodes managed by the service. + +If an entry does not include any additional inventory or traits, it will be +logged at load time but otherwise ignored. In the case of a resource provider +being identified by both ``$COMPUTE_NODE`` and individual UUID/name, the +values in the ``$COMPUTE_NODE`` entry will be ignored for *that provider* only +if the explicit entry includes inventory or traits. + +.. note:: + + In the case that a resource provider is identified more than once by + explicit UUID/name, the nova-compute service will fail to start. This + is a global requirement across all supplied ``provider.yaml`` files. + +.. code-block:: yaml + + meta: + schema_version: '1.0' + providers: + - identification: + name: 'EXAMPLE_RESOURCE_PROVIDER' + # Additional valid identification examples: + # uuid: '$COMPUTE_NODE' + # uuid: '5213b75d-9260-42a6-b236-f39b0fd10561' + inventories: + additional: + - CUSTOM_EXAMPLE_RESOURCE_CLASS: + total: 100 + reserved: 0 + min_unit: 1 + max_unit: 10 + step_size: 1 + allocation_ratio: 1.0 + traits: + additional: + - 'CUSTOM_EXAMPLE_TRAIT' + +Schema Example +-------------- +.. code-block:: yaml + + type: object + properties: + # This property is used to track where the provider.yaml file originated. + # It is reserved for internal use and should never be set in a provider.yaml + # file supplied by an end user. + __source_file: + not: {} + meta: + type: object + properties: + # Version ($Major, $minor) of the schema must successfully parse + # documents conforming to ($Major, 0..N). Any breaking schema change + # (e.g. removing fields, adding new required fields, imposing a stricter + # pattern on a value, etc.) must bump $Major. + schema_version: + type: string + pattern: '^1\.([0-9]|[1-9][0-9]+)$' + required: + - schema_version + additionalProperties: true + providers: + type: array + items: + type: object + properties: + identification: + $ref: '#/provider_definitions/provider_identification' + inventories: + $ref: '#/provider_definitions/provider_inventories' + traits: + $ref: '#/provider_definitions/provider_traits' + required: + - identification + additionalProperties: true + required: + - meta + additionalProperties: true + + provider_definitions: + provider_identification: + # Identify a single provider to configure. Exactly one identification + # method should be used. Currently `uuid` or `name` are supported, but + # future versions may support others. + # The uuid can be set to the sentinel value `$COMPUTE_NODE` which will + # cause the consuming compute service to apply the configuration to + # to all compute node root providers it manages that are not otherwise + # specified using a uuid or name. + type: object + properties: + uuid: + oneOf: + # TODO(sean-k-mooney): replace this with type uuid when we can depend + # on a version of the jsonschema lib that implements draft 8 or later + # of the jsonschema spec. + - type: string + pattern: '^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}$' + - type: string + const: '$COMPUTE_NODE' + name: + type: string + minLength: 1 + # This introduces the possibility of an unsupported key name being used to + # get by schema validation, but is necessary to support forward + # compatibility with new identification methods. This should be checked + # after schema validation. + minProperties: 1 + maxProperties: 1 + additionalProperties: false + provider_inventories: + # Allows the admin to specify various adjectives to create and manage + # providers' inventories. This list of adjectives can be extended in the + # future as the schema evolves to meet new use cases. As of v1.0, only one + # adjective, `additional`, is supported. + type: object + properties: + additional: + type: array + items: + patternProperties: + # Allows any key name matching the resource class pattern, + # check to prevent conflicts with virt driver owned resouces classes + # will be done after schema validation. + ^[A-Z0-9_]{1,255}$: + type: object + properties: + # Any optional properties not populated will be given a default value by + # placement. If overriding a pre-existing provider values will not be + # preserved from the existing inventory. + total: + type: integer + reserved: + type: integer + min_unit: + type: integer + max_unit: + type: integer + step_size: + type: integer + allocation_ratio: + type: number + required: + - total + # The defined properties reflect the current placement data + # model. While defining those in the schema and not allowing + # additional properties means we will need to bump the schema + # version if they change, that is likely to be part of a large + # change that may have other impacts anyway. The benefit of + # stricter validation of property names outweighs the (small) + # chance of having to bump the schema version as described above. + additionalProperties: false + # This ensures only keys matching the pattern above are allowed + additionalProperties: false + additionalProperties: true + provider_traits: + # Allows the admin to specify various adjectives to create and manage + # providers' traits. This list of adjectives can be extended in the + # future as the schema evolves to meet new use cases. As of v1.0, only one + # adjective, `additional`, is supported. + type: object + properties: + additional: + type: array + items: + # Allows any value matching the trait pattern here, additional + # validation will be done after schema validation. + type: string + pattern: '^[A-Z0-9_]{1,255}$' + additionalProperties: true + +.. note:: + + When creating a ``provider.yaml`` config file it is recommended to use the + schema provided by nova to validate the config using a simple jsonschema + validator rather than starting the nova compute agent to enable faster + iteration. + diff --git a/doc/source/admin/metadata-service.rst b/doc/source/admin/metadata-service.rst new file mode 100644 index 00000000000..d1d816610d1 --- /dev/null +++ b/doc/source/admin/metadata-service.rst @@ -0,0 +1,190 @@ +================ +Metadata service +================ + +.. note:: + + This section provides deployment information about the metadata service. For + end-user information about the metadata service and instance metadata in + general, refer to the :ref:`user guide `. + +The metadata service provides a way for instances to retrieve instance-specific +data. Instances access the metadata service at ``http://169.254.169.254``. The +metadata service supports two sets of APIs - an OpenStack metadata API and an +EC2-compatible API - and also exposes vendordata and user data. Both the +OpenStack metadata and EC2-compatible APIs are versioned by date. + +The metadata service can be run globally, as part of the :program:`nova-api` +application, or on a per-cell basis, as part of the standalone +:program:`nova-api-metadata` application. A detailed comparison is provided in +the :ref:`cells V2 guide `. + +.. versionchanged:: 19.0.0 + + The ability to run the nova metadata API service on a per-cell basis was + added in Stein. For versions prior to this release, you should not use the + standalone :program:`nova-api-metadata` application for multiple cells. + +Guests access the service at ``169.254.169.254`` or at ``fe80::a9fe:a9fe``. + +.. versionchanged:: 22.0.0 + + Starting with the Victoria release the metadata service is accessible + over IPv6 at the link-local address ``fe80::a9fe:a9fe``. + +The networking service, +neutron, is responsible for intercepting these requests and adding HTTP headers +which uniquely identify the source of the request before forwarding it to the +metadata API server. For the Open vSwitch and Linux Bridge backends provided +with neutron, the flow looks something like so: + +#. Instance sends a HTTP request for metadata to ``169.254.169.254``. + +#. This request either hits the router or DHCP namespace depending on the route + in the instance + +#. The metadata proxy service in the namespace adds the following info to the + request: + + - Instance IP (``X-Forwarded-For`` header) + - Router or Network-ID (``X-Neutron-Network-Id`` or ``X-Neutron-Router-Id`` + header) + +#. The metadata proxy service sends this request to the metadata agent (outside + the namespace) via a UNIX domain socket. + +#. The :program:`neutron-metadata-agent` application forwards the request to the + nova metadata API service by adding some new headers (instance ID and Tenant + ID) to the request. + +This flow may vary if a different networking backend is used. + +Neutron and nova must be configured to communicate together with a shared +secret. Neutron uses this secret to sign the Instance-ID header of the metadata +request to prevent spoofing. This secret is configured through the +:oslo.config:option:`neutron.metadata_proxy_shared_secret` config option in nova +and the equivalent ``metadata_proxy_shared_secret`` config option in neutron. + +Configuration +------------- + +The :program:`nova-api` application accepts the following metadata +service-related options: + +- :oslo.config:option:`enabled_apis` +- :oslo.config:option:`enabled_ssl_apis` +- :oslo.config:option:`neutron.service_metadata_proxy` +- :oslo.config:option:`neutron.metadata_proxy_shared_secret` +- :oslo.config:option:`api.metadata_cache_expiration` +- :oslo.config:option:`api.use_forwarded_for` +- :oslo.config:option:`api.local_metadata_per_cell` +- :oslo.config:option:`api.dhcp_domain` + +.. note:: + + This list excludes configuration options related to the vendordata feature. + Refer to :doc:`vendordata feature documentation ` for + information on configuring this. + +For example, to configure the :program:`nova-api` application to serve the +metadata API, without SSL, using the ``StaticJSON`` vendordata provider, add the +following to a :file:`nova-api.conf` file: + +.. code-block:: ini + + [DEFAULT] + enabled_apis = osapi_compute,metadata + enabled_ssl_apis = + metadata_listen = 0.0.0.0 + metadata_listen_port = 0 + metadata_workers = 4 + + [neutron] + service_metadata_proxy = True + + [api] + dhcp_domain = + metadata_cache_expiration = 15 + use_forwarded_for = False + local_metadata_per_cell = False + vendordata_providers = StaticJSON + vendordata_jsonfile_path = /etc/nova/vendor_data.json + +.. note:: + + This does not include configuration options that are not metadata-specific + but are nonetheless required, such as + :oslo.config:option:`api.auth_strategy`. + +Configuring the application to use the ``DynamicJSON`` vendordata provider is +more involved and is not covered here. + +The :program:`nova-api-metadata` application accepts almost the same options: + +- :oslo.config:option:`neutron.service_metadata_proxy` +- :oslo.config:option:`neutron.metadata_proxy_shared_secret` +- :oslo.config:option:`api.metadata_cache_expiration` +- :oslo.config:option:`api.use_forwarded_for` +- :oslo.config:option:`api.local_metadata_per_cell` +- :oslo.config:option:`api.dhcp_domain` + +.. note:: + + This list excludes configuration options related to the vendordata feature. + Refer to :doc:`vendordata feature documentation ` for + information on configuring this. + +For example, to configure the :program:`nova-api-metadata` application to serve +the metadata API, without SSL, add the following to a :file:`nova-api.conf` +file: + +.. code-block:: ini + + [DEFAULT] + metadata_listen = 0.0.0.0 + metadata_listen_port = 0 + metadata_workers = 4 + + [neutron] + service_metadata_proxy = True + + [api] + dhcp_domain = + metadata_cache_expiration = 15 + use_forwarded_for = False + local_metadata_per_cell = False + +.. note:: + + This does not include configuration options that are not metadata-specific + but are nonetheless required, such as + :oslo.config:option:`api.auth_strategy`. + +For information about configuring the neutron side of the metadata service, +refer to the :neutron-doc:`neutron configuration guide +` + + +Config drives +------------- + +Config drives are special drives that are attached to an instance when it boots. +The instance can mount this drive and read files from it to get information that +is normally available through the metadata service. For more information, refer +to :doc:`/admin/config-drive` and the :ref:`user guide `. + + +Vendordata +---------- + +Vendordata provides a way to pass vendor or deployment-specific information to +instances. For more information, refer to :doc:`/admin/vendordata` and the +:ref:`user guide `. + + +User data +--------- + +User data is a blob of data that the user can specify when they launch an +instance. For more information, refer to :ref:`the user guide +`. diff --git a/doc/source/admin/migrate-instance-with-snapshot.rst b/doc/source/admin/migrate-instance-with-snapshot.rst index 06509003fc6..65059679abb 100644 --- a/doc/source/admin/migrate-instance-with-snapshot.rst +++ b/doc/source/admin/migrate-instance-with-snapshot.rst @@ -65,6 +65,10 @@ Create a snapshot of the instance $ openstack server image create --name myInstanceSnapshot myInstance + If snapshot operations routinely fail because the user token times out + while uploading a large disk image, consider configuring nova to use + :ref:`service user tokens `. + #. Use the :command:`openstack image list` command to check the status until the status is ``ACTIVE``: diff --git a/doc/source/admin/migration.rst b/doc/source/admin/migration.rst index 3020825e894..978a91a51ff 100644 --- a/doc/source/admin/migration.rst +++ b/doc/source/admin/migration.rst @@ -4,70 +4,97 @@ Migrate instances .. note:: - This documentation is about cold-migration. For live-migration usage, see + This documentation is about cold migration. For live migration usage, see :doc:`live-migration-usage`. -When you want to move an instance from one compute host to another, you can use -the :command:`openstack server migrate` command. The scheduler chooses the -destination compute host based on its settings. This process does not assume -that the instance has shared storage available on the target host. If you are -using SSH tunneling, you must ensure that each node is configured with SSH key -authentication so that the Compute service can use SSH to move disks to other -nodes. For more information, see :ref:`cli-os-migrate-cfg-ssh`. +When you want to move an instance from one compute host to another, you can +migrate the instance. The migration operation, which is also known as the cold +migration operation to distinguish it from the live migration operation, +functions similarly to :doc:`the resize operation ` with the main +difference being that a cold migration does not change the flavor of the +instance. As with resize, the scheduler chooses the destination compute host +based on its settings. This process does not assume that the instance has shared +storage available on the target host. If you are using SSH tunneling, you must +ensure that each node is configured with SSH key authentication so that the +Compute service can use SSH to move disks to other nodes. For more information, +see :ref:`cli-os-migrate-cfg-ssh`. -#. To list the VMs you want to migrate, run: +To list the VMs you want to migrate, run: - .. code-block:: console +.. code-block:: console - $ openstack server list + $ openstack server list -#. Use the :command:`openstack server migrate` command. +Once you have the name or UUID of the server you wish to migrate, migrate it +using the :command:`openstack server migrate` command: - .. code-block:: console +.. code-block:: console - $ openstack server migrate VM_INSTANCE + $ openstack server migrate SERVER -#. To migrate an instance and watch the status, use this example script: +Once an instance has successfully migrated, you can use the :command:`openstack +server migrate confirm` command to confirm it: - .. code-block:: bash +.. code-block:: console - #!/bin/bash + $ openstack server migrate confirm SERVER - # Provide usage - usage() { - echo "Usage: $0 VM_ID" - exit 1 - } +Alternatively, you can use the :command:`openstack server migrate revert` +command to revert the migration and restore the instance to its previous host: - [[ $# -eq 0 ]] && usage +.. code-block:: console - # Migrate the VM to an alternate hypervisor - echo -n "Migrating instance to alternate host" - VM_ID=$1 - openstack server migrate $VM_ID - VM_OUTPUT=$(openstack server show $VM_ID) - VM_STATUS=$(echo "$VM_OUTPUT" | grep status | awk '{print $4}') - while [[ "$VM_STATUS" != "VERIFY_RESIZE" ]]; do - echo -n "." - sleep 2 - VM_OUTPUT=$(openstack server show $VM_ID) - VM_STATUS=$(echo "$VM_OUTPUT" | grep status | awk '{print $4}') - done - openstack server resize --confirm $VM_ID - echo " instance migrated and resized." - echo; + $ openstack server migrate revert SERVER - # Show the details for the VM - echo "Updated instance details:" - openstack server show $VM_ID +.. note:: + + You can configure automatic confirmation of migrations and resizes. Refer to + the :oslo.config:option:`resize_confirm_window` option for more information. + + +Example +------- + +To migrate an instance and watch the status, use this example script: + +.. code-block:: bash + + #!/bin/bash + + # Provide usage + usage() { + echo "Usage: $0 VM_ID" + exit 1 + } + + [[ $# -eq 0 ]] && usage + VM_ID=$1 + + # Show the details for the VM + echo "Instance details:" + openstack server show ${VM_ID} + + # Migrate the VM to an alternate hypervisor + echo -n "Migrating instance to alternate host " + openstack server migrate ${VM_ID} + while [[ "$(openstack server show ${VM_ID} -f value -c status)" != "VERIFY_RESIZE" ]]; do + echo -n "." + sleep 2 + done + openstack server migrate confirm ${VM_ID} + echo " instance migrated and resized." + + # Show the details for the migrated VM + echo "Migrated instance details:" + openstack server show ${VM_ID} - # Pause to allow users to examine VM details - read -p "Pausing, press to exit." + # Pause to allow users to examine VM details + read -p "Pausing, press to exit." .. note:: If you see the following error, it means you are either running the command - with the wrong credentials, such as a non-admin user, or the ``policy.json`` + with the wrong credentials, such as a non-admin user, or the ``policy.yaml`` file prevents migration for your user:: Policy doesn't allow os_compute_api:os-migrate-server:migrate to be performed. (HTTP 403) diff --git a/doc/source/admin/networking-nova.rst b/doc/source/admin/networking-nova.rst deleted file mode 100644 index d071a5cbc7e..00000000000 --- a/doc/source/admin/networking-nova.rst +++ /dev/null @@ -1,873 +0,0 @@ -============================ -Networking with nova-network -============================ - -.. deprecated:: 14.0.0 - - ``nova-network`` was deprecated in the OpenStack Newton release. In Ocata - and future releases, you can start ``nova-network`` only with a cells v1 - configuration. This is not a recommended configuration for deployment. - -Understanding the networking configuration options helps you design the best -configuration for your Compute instances. - -You can choose to either install and configure ``nova-network`` or use the -OpenStack Networking service (neutron). This section contains a brief overview -of ``nova-network``. For more information about OpenStack Networking, refer to -:neutron-doc:`the documentation <>`. - -Networking concepts -~~~~~~~~~~~~~~~~~~~ - -Compute assigns a private IP address to each VM instance. Compute makes a -distinction between fixed IPs and floating IP. Fixed IPs are IP addresses that -are assigned to an instance on creation and stay the same until the instance is -explicitly terminated. Floating IPs are addresses that can be dynamically -associated with an instance. A floating IP address can be disassociated and -associated with another instance at any time. A user can reserve a floating IP -for their project. - -.. note:: - - Currently, Compute with ``nova-network`` only supports Linux bridge - networking that allows virtual interfaces to connect to the outside network - through the physical interface. - -The network controller with ``nova-network`` provides virtual networks to -enable compute servers to interact with each other and with the public network. -Compute with ``nova-network`` supports the following network modes, which are -implemented as Network Manager types: - -Flat Network Manager - In this mode, a network administrator specifies a subnet. IP addresses for VM - instances are assigned from the subnet, and then injected into the image on - launch. Each instance receives a fixed IP address from the pool of available - addresses. A system administrator must create the Linux networking bridge - (typically named ``br100``, although this is configurable) on the systems - running the ``nova-network`` service. All instances of the system are - attached to the same bridge, which is configured manually by the network - administrator. - -.. note:: - - Configuration injection currently only works on Linux-style systems that - keep networking configuration in ``/etc/network/interfaces``. - -Flat DHCP Network Manager - In this mode, OpenStack starts a DHCP server (dnsmasq) to allocate IP - addresses to VM instances from the specified subnet, in addition to manually - configuring the networking bridge. IP addresses for VM instances are assigned - from a subnet specified by the network administrator. - - Like flat mode, all instances are attached to a single bridge on the compute - node. Additionally, a DHCP server configures instances depending on - single-/multi-host mode, alongside each ``nova-network``. In this mode, - Compute does a bit more configuration. It attempts to bridge into an Ethernet - device (``flat_interface``, eth0 by default). For every instance, Compute - allocates a fixed IP address and configures dnsmasq with the MAC ID and IP - address for the VM. Dnsmasq does not take part in the IP address allocation - process, it only hands out IPs according to the mapping done by Compute. - Instances receive their fixed IPs with the :command:`dhcpdiscover` command. - These IPs are not assigned to any of the host's network interfaces, only to - the guest-side interface for the VM. - - In any setup with flat networking, the hosts providing the ``nova-network`` - service are responsible for forwarding traffic from the private network. They - also run and configure dnsmasq as a DHCP server listening on this bridge, - usually on IP address 10.0.0.1 (see :ref:`compute-dnsmasq`). Compute can - determine the NAT entries for each network, although sometimes NAT is not - used, such as when the network has been configured with all public IPs, or if - a hardware router is used (which is a high availability option). In this - case, hosts need to have ``br100`` configured and physically connected to any - other nodes that are hosting VMs. You must set the ``flat_network_bridge`` - option or create networks with the bridge parameter in order to avoid raising - an error. Compute nodes have iptables or ebtables entries created for each - project and instance to protect against MAC ID or IP address spoofing and ARP - poisoning. - -.. note:: - - In single-host Flat DHCP mode you will be able to ping VMs through their - fixed IP from the ``nova-network`` node, but you cannot ping them from the - compute nodes. This is expected behavior. - -VLAN Network Manager - This is the default mode for OpenStack Compute. In this mode, Compute creates - a VLAN and bridge for each project. For multiple-machine installations, the - VLAN Network Mode requires a switch that supports VLAN tagging (IEEE 802.1Q). - The project gets a range of private IPs that are only accessible from inside - the VLAN. In order for a user to access the instances in their project, a - special VPN instance (code named ``cloudpipe``) needs to be created. Compute - generates a certificate and key for the user to access the VPN and starts the - VPN automatically. It provides a private network segment for each project's - instances that can be accessed through a dedicated VPN connection from the - internet. In this mode, each project gets its own VLAN, Linux networking - bridge, and subnet. - - The subnets are specified by the network administrator, and are assigned - dynamically to a project when required. A DHCP server is started for each - VLAN to pass out IP addresses to VM instances from the subnet assigned to the - project. All instances belonging to one project are bridged into the same - VLAN for that project. OpenStack Compute creates the Linux networking bridges - and VLANs when required. - -These network managers can co-exist in a cloud system. However, because you -cannot select the type of network for a given project, you cannot configure -multiple network types in a single Compute installation. - -All network managers configure the network using network drivers. For example, -the Linux L3 driver (``l3.py`` and ``linux_net.py``), which makes use of -``iptables``, ``route`` and other network management facilities, and the -libvirt `network filtering facilities -`__. The driver is not tied to any -particular network manager; all network managers use the same driver. The -driver usually initializes only when the first VM lands on this host node. - -All network managers operate in either single-host or multi-host mode. This -choice greatly influences the network configuration. In single-host mode, a -single ``nova-network`` service provides a default gateway for VMs and hosts a -single DHCP server (dnsmasq). In multi-host mode, each compute node runs its -own ``nova-network`` service. In both cases, all traffic between VMs and the -internet flows through ``nova-network``. Each mode has benefits and drawbacks. - -All networking options require network connectivity to be already set up -between OpenStack physical nodes. OpenStack does not configure any physical -network interfaces. All network managers automatically create VM virtual -interfaces. Some network managers can also create network bridges such as -``br100``. - -The internal network interface is used for communication with VMs. The -interface should not have an IP address attached to it before OpenStack -installation, it serves only as a fabric where the actual endpoints are VMs and -dnsmasq. Additionally, the internal network interface must be in -``promiscuous`` mode, so that it can receive packets whose target MAC address -is the guest VM, not the host. - -All machines must have a public and internal network interface (controlled by -these options: ``public_interface`` for the public interface, and -``flat_interface`` and ``vlan_interface`` for the internal interface with flat -or VLAN managers). This guide refers to the public network as the external -network and the private network as the internal or project network. - -For flat and flat DHCP modes, use the :command:`nova network-create` command to -create a network: - -.. code-block:: console - - $ nova network-create vmnet \ - --fixed-range-v4 10.0.0.0/16 --fixed-cidr 10.0.20.0/24 --bridge br100 - -This example uses the following parameters: - -``--fixed-range-v4`` - Specifies the network subnet. -``--fixed-cidr`` - Specifies a range of fixed IP addresses to allocate, and can be a subset of - the ``--fixed-range-v4`` argument. -``--bridge`` - Specifies the bridge device to which this network is connected on every - compute node. - -.. _compute-dnsmasq: - -DHCP server: dnsmasq -~~~~~~~~~~~~~~~~~~~~ - -The Compute service uses `dnsmasq -`__ as the DHCP server when -using either Flat DHCP Network Manager or VLAN Network Manager. For Compute to -operate in IPv4/IPv6 dual-stack mode, use at least dnsmasq v2.63. The -``nova-network`` service is responsible for starting dnsmasq processes. - -The behavior of dnsmasq can be customized by creating a dnsmasq configuration -file. Specify the configuration file using the ``dnsmasq_config_file`` -configuration option: - -.. code-block:: ini - - dnsmasq_config_file=/etc/dnsmasq-nova.conf - -For more information about creating a dnsmasq configuration file, see the -:doc:`/configuration/config`, and `the dnsmasq documentation -`__. - -Dnsmasq also acts as a caching DNS server for instances. You can specify the -DNS server that dnsmasq uses by setting the ``dns_server`` configuration option -in ``/etc/nova/nova.conf``. This example configures dnsmasq to use Google's -public DNS server: - -.. code-block:: ini - - dns_server=8.8.8.8 - -Dnsmasq logs to syslog (typically ``/var/log/syslog`` or ``/var/log/messages``, -depending on Linux distribution). Logs can be useful for troubleshooting, -especially in a situation where VM instances boot successfully but are not -reachable over the network. - -Administrators can specify the starting point IP address to reserve with the -DHCP server (in the format n.n.n.n) with this command: - -.. code-block:: console - - $ nova-manage fixed reserve --address IP_ADDRESS - -This reservation only affects which IP address the VMs start at, not the fixed -IP addresses that ``nova-network`` places on the bridges. - -Configure Compute to use IPv6 addresses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you are using OpenStack Compute with ``nova-network``, you can put Compute -into dual-stack mode, so that it uses both IPv4 and IPv6 addresses for -communication. In dual-stack mode, instances can acquire their IPv6 global -unicast addresses by using a stateless address auto-configuration mechanism -[RFC 4862/2462]. IPv4/IPv6 dual-stack mode works with both ``VlanManager`` and -``FlatDHCPManager`` networking modes. - -In ``VlanManager`` networking mode, each project uses a different 64-bit global -routing prefix. In ``FlatDHCPManager`` mode, all instances use one 64-bit -global routing prefix. - -This configuration was tested with virtual machine images that have an IPv6 -stateless address auto-configuration capability. This capability is required -for any VM to run with an IPv6 address. You must use an EUI-64 address for -stateless address auto-configuration. Each node that executes a ``nova-*`` -service must have ``python-netaddr`` and ``radvd`` installed. - -.. rubric:: Switch into IPv4/IPv6 dual-stack mode - -#. For every node running a ``nova-*`` service, install ``python-netaddr``: - - .. code-block:: console - - # apt-get install python-netaddr - -#. For every node running ``nova-network``, install ``radvd`` and configure - IPv6 networking: - - .. code-block:: console - - # apt-get install radvd - # echo 1 > /proc/sys/net/ipv6/conf/all/forwarding - # echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra - -#. On all nodes, edit the ``nova.conf`` file and specify ``use_ipv6 = True``. - -#. Restart all ``nova-*`` services. - -.. rubric:: IPv6 configuration options - -You can use the following options with the :command:`nova network-create` -command: - -- Add a fixed range for IPv6 addresses to the :command:`nova network-create` - command. Specify ``public`` or ``private`` after the ``network-create`` - parameter. - - .. code-block:: console - - $ nova network-create public --fixed-range-v4 FIXED_RANGE_V4 \ - --vlan VLAN_ID --vpn VPN_START --fixed-range-v6 FIXED_RANGE_V6 - -- Set the IPv6 global routing prefix by using the ``--fixed_range_v6`` - parameter. The default value for the parameter is ``fd00::/48``. - - When you use ``FlatDHCPManager``, the command uses the original - ``--fixed_range_v6`` value. For example: - - .. code-block:: console - - $ nova network-create public --fixed-range-v4 10.0.2.0/24 \ - --fixed-range-v6 fd00:1::/48 - -- When you use ``VlanManager``, the command increments the subnet ID to create - subnet prefixes. Guest VMs use this prefix to generate their IPv6 global - unicast addresses. For example: - - .. code-block:: console - - $ nova network-create public --fixed-range-v4 10.0.1.0/24 --vlan 100 \ - --vpn 1000 --fixed-range-v6 fd00:1::/48 - -.. list-table:: Description of IPv6 configuration options - :header-rows: 2 - - * - Configuration option = Default value - - Description - * - [DEFAULT] - - - * - fixed_range_v6 = fd00::/48 - - (StrOpt) Fixed IPv6 address block - * - gateway_v6 = None - - (StrOpt) Default IPv6 gateway - * - ipv6_backend = rfc2462 - - (StrOpt) Backend to use for IPv6 generation - * - use_ipv6 = False - - (BoolOpt) Use IPv6 - -.. _metadata-service-deploy: - -Metadata service -~~~~~~~~~~~~~~~~ - -.. TODO: This should be moved into its own document once we add information - about integrating this with neutron rather than nova-network. - -This section provides deployment information about the metadata service. For -end-user information about the metadata service, see the -:doc:`user guide `. - -The metadata service is implemented by either the ``nova-api`` service or the -``nova-api-metadata`` service. Note that the ``nova-api-metadata`` service is -generally only used when running in multi-host mode, as it retrieves -instance-specific metadata. If you are running the ``nova-api`` service, you -must have ``metadata`` as one of the elements listed in the ``enabled_apis`` -configuration option in ``/etc/nova/nova.conf``. The default ``enabled_apis`` -configuration setting includes the metadata service, so you do not need to -modify it. - -Hosts access the service at ``169.254.169.254:80``, and this is translated to -``metadata_host:metadata_port`` by an iptables rule established by the -``nova-network`` service. In multi-host mode, you can set ``metadata_host`` to -``127.0.0.1``. - -For instances to reach the metadata service, the ``nova-network`` service must -configure iptables to NAT port ``80`` of the ``169.254.169.254`` address to the -IP address specified in ``metadata_host`` (this defaults to ``$my_ip``, which -is the IP address of the ``nova-network`` service) and port specified in -``metadata_port`` (which defaults to ``8775``) in ``/etc/nova/nova.conf``. - -.. note:: - - The ``metadata_host`` configuration option must be an IP address, not a host - name. - -The default Compute service settings assume that ``nova-network`` and -``nova-api`` are running on the same host. If this is not the case, in the -``/etc/nova/nova.conf`` file on the host running ``nova-network``, set the -``metadata_host`` configuration option to the IP address of the host where -``nova-api`` is running. - -.. TODO: Consider grouping the metadata options into the same [metadata] - group and then we can just link to that in the generated config option doc. - -.. list-table:: Description of metadata configuration options - :header-rows: 2 - - * - Configuration option = Default value - - Description - * - [DEFAULT] - - - * - :oslo.config:option:`metadata_host` = $my_ip - - (StrOpt) The IP address for the metadata API server - * - :oslo.config:option:`metadata_listen` = 0.0.0.0 - - (StrOpt) The IP address on which the metadata API will listen. - * - :oslo.config:option:`metadata_listen_port` = 8775 - - (IntOpt) The port on which the metadata API will listen. - * - :oslo.config:option:`metadata_port` = 8775 - - (IntOpt) The port for the metadata API port - * - :oslo.config:option:`metadata_workers` = None - - (IntOpt) Number of workers for metadata service. The default will be - the number of CPUs available. - * - **[api]** - - - * - :oslo.config:option:`metadata_cache_expiration ` = 15 - - (IntOpt) Time in seconds to cache metadata; 0 to disable metadata - caching entirely (not recommended). Increasing this should improve - response times of the metadata API when under heavy load. Higher values - may increase memory usage and result in longer times for host metadata - changes to take effect. - * - :oslo.config:option:`vendordata_providers ` = StaticJSON - - (ListOpt) A list of vendordata providers. See - :doc:`Vendordata ` for more information. - * - :oslo.config:option:`vendordata_jsonfile_path ` = None - - (StrOpt) File to load JSON formatted vendor data from - -Enable ping and SSH on VMs -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You need to enable ``ping`` and ``ssh`` on your VMs for network access. This -can be done with the :command:`openstack` command. - -.. note:: - - Run these commands as root only if the credentials used to interact with - ``nova-api`` are in ``/root/.bashrc``. - -Enable ping and SSH with :command:`openstack security group rule create` -commands: - -.. code-block:: console - - $ openstack security group rule create --protocol icmp default - $ openstack security group rule create --protocol tcp --dst-port 22:22 default - -If you have run these commands and still cannot ping or SSH your instances, -check the number of running ``dnsmasq`` processes, there should be two. If not, -kill the processes and restart the service with these commands: - -.. code-block:: console - - # killall dnsmasq - # service nova-network restart - -Configure public (floating) IP addresses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This section describes how to configure floating IP addresses with -``nova-network``. For information about doing this with OpenStack Networking, -refer to :neutron-doc:`L3-routing-and-NAT -`. - -Private and public IP addresses -------------------------------- - -In this section, the term floating IP address is used to refer to an IP -address, usually public, that you can dynamically add to a running virtual -instance. - -Every virtual instance is automatically assigned a private IP address. You can -choose to assign a public (or floating) IP address instead. OpenStack Compute -uses network address translation (NAT) to assign floating IPs to virtual -instances. - -To be able to assign a floating IP address, edit the ``/etc/nova/nova.conf`` -file to specify which interface the ``nova-network`` service should bind public -IP addresses to: - -.. code-block:: ini - - public_interface=VLAN100 - -If you make changes to the ``/etc/nova/nova.conf`` file while the -``nova-network`` service is running, you will need to restart the service to -pick up the changes. - -.. note:: - - Floating IPs are implemented by using a source NAT (SNAT rule in iptables), - so security groups can sometimes display inconsistent behavior if VMs use - their floating IP to communicate with other VMs, particularly on the same - physical host. Traffic from VM to VM across the fixed network does not have - this issue, and so this is the recommended setup. To ensure that traffic - does not get SNATed to the floating range, explicitly set: - - .. code-block:: ini - - dmz_cidr=x.x.x.x/y - - The ``x.x.x.x/y`` value specifies the range of floating IPs for each pool of - floating IPs that you define. This configuration is also required if the VMs - in the source group have floating IPs. - -Enable IP forwarding --------------------- - -IP forwarding is disabled by default on most Linux distributions. You will need -to enable it in order to use floating IPs. - -.. note:: - - IP forwarding only needs to be enabled on the nodes that run - ``nova-network``. However, you will need to enable it on all compute nodes - if you use ``multi_host`` mode. - -To check if IP forwarding is enabled, run: - -.. code-block:: console - - $ cat /proc/sys/net/ipv4/ip_forward - 0 - -Alternatively, run: - -.. code-block:: console - - $ sysctl net.ipv4.ip_forward - net.ipv4.ip_forward = 0 - -In these examples, IP forwarding is disabled. - -To enable IP forwarding dynamically, run: - -.. code-block:: console - - # sysctl -w net.ipv4.ip_forward=1 - -Alternatively, run: - -.. code-block:: console - - # echo 1 > /proc/sys/net/ipv4/ip_forward - -To make the changes permanent, edit the ``/etc/sysctl.conf`` file and update -the IP forwarding setting: - -.. code-block:: ini - - net.ipv4.ip_forward = 1 - -Save the file and run this command to apply the changes: - -.. code-block:: console - - # sysctl -p - -You can also apply the changes by restarting the network service: - -- on Ubuntu, Debian: - - .. code-block:: console - - # /etc/init.d/networking restart - -- on RHEL, Fedora, CentOS, openSUSE and SLES: - - .. code-block:: console - - # service network restart - -Create a list of available floating IP addresses ------------------------------------------------- - -Compute maintains a list of floating IP addresses that are available for -assigning to instances. Use the :command:`nova-manage floating` commands to -perform floating IP operations: - -- Add entries to the list: - - .. code-block:: console - - # nova-manage floating create --pool nova --ip_range 68.99.26.170/31 - -- List the floating IP addresses in the pool: - - .. code-block:: console - - # openstack floating ip list - -- Create specific floating IPs for either a single address or a subnet: - - .. code-block:: console - - # nova-manage floating create --pool POOL_NAME --ip_range CIDR - -- Remove floating IP addresses using the same parameters as the create command: - - .. code-block:: console - - # openstack floating ip delete CIDR - -For more information about how administrators can associate floating IPs with -instances, see :python-openstackclient-doc:`ip floating -` in the *python-openstackclient* User -Documentation. - -Automatically add floating IPs ------------------------------- - -You can configure ``nova-network`` to automatically allocate and assign a -floating IP address to virtual instances when they are launched. Add this line -to the ``/etc/nova/nova.conf`` file: - -.. code-block:: ini - - auto_assign_floating_ip=True - -Save the file, and restart ``nova-network`` - -.. note:: - - If this option is enabled, but all floating IP addresses have already been - allocated, the :command:`openstack server create` command will fail. - -Remove a network from a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You cannot delete a network that has been associated to a project. This section -describes the procedure for dissociating it so that it can be deleted. - -In order to disassociate the network, you will need the ID of the project it -has been associated to. To get the project ID, you will need to be an -administrator. - -Disassociate the network from the project using the :command:`nova-manage -project scrub` command, with the project ID as the final parameter: - -.. code-block:: console - - # nova-manage project scrub --project ID - -Multiple interfaces for instances (multinic) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The multinic feature allows you to use more than one interface with your -instances. This is useful in several scenarios: - -- SSL Configurations (VIPs) - -- Services failover/HA - -- Bandwidth Allocation - -- Administrative/Public access to your instances - -Each VIP represents a separate network with its own IP block. Every network -mode has its own set of changes regarding multinic usage: - -.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-manager.jpg - :width: 600 - -.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-Flat-DHCP-manager.jpg - :width: 600 - -.. figure:: figures/SCH_5007_V00_NUAC-multi_nic_OpenStack-VLAN-manager.jpg - :width: 600 - -Using multinic --------------- - -In order to use multinic, create two networks, and attach them to the project -(named ``project`` on the command line): - -.. code-block:: console - - $ nova network-create first-net --fixed-range-v4 20.20.0.0/24 --project-id $your-project - $ nova network-create second-net --fixed-range-v4 20.20.10.0/24 --project-id $your-project - -Each new instance will now receive two IP addresses from their respective DHCP -servers: - -.. code-block:: console - - $ openstack server list - +---------+----------+--------+-----------------------------------------+------------+ - |ID | Name | Status | Networks | Image Name | - +---------+----------+--------+-----------------------------------------+------------+ - | 1234... | MyServer | ACTIVE | network2=20.20.0.3; private=20.20.10.14 | cirros | - +---------+----------+--------+-----------------------------------------+------------+ - -.. note:: - - Make sure you start the second interface on the instance, or it won't be - reachable through the second IP. - -This example demonstrates how to set up the interfaces within the instance. -This is the configuration that needs to be applied inside the image. - -Edit the ``/etc/network/interfaces`` file: - -.. code-block:: bash - - # The loopback network interface - auto lo - iface lo inet loopback - - auto eth0 - iface eth0 inet dhcp - - auto eth1 - iface eth1 inet dhcp - -If the Virtual Network Service Neutron is installed, you can specify the -networks to attach to the interfaces by using the ``--nic`` flag with the -:command:`openstack server create` command: - -.. code-block:: console - - $ openstack server create --image ed8b2a37-5535-4a5f-a615-443513036d71 \ - --flavor 1 --nic net-id=NETWORK1_ID --nic net-id=NETWORK2_ID test-vm1 - -Troubleshooting Networking -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Cannot reach floating IPs -------------------------- - -Problem -------- - -You cannot reach your instances through the floating IP address. - -Solution --------- - -- Check that the default security group allows ICMP (ping) and SSH (port 22), - so that you can reach the instances: - - .. code-block:: console - - $ openstack security group rule list default - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - | ID | IP Protocol | IP Range | Port Range | Remote Security Group | - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - | 63536865-e5b6-4df1-bac5-ca6d97d8f54d | tcp | 0.0.0.0/0 | 22:22 | None | - | e9d3200f-647a-4293-a9fc-e65ceee189ae | icmp | 0.0.0.0/0 | type=1:code=-1 | None | - +--------------------------------------+-------------+-----------+-----------------+-----------------------+ - -- Check the NAT rules have been added to iptables on the node that is running - ``nova-network``: - - .. code-block:: console - - # iptables -L -nv -t nat \ - -A nova-network-PREROUTING -d 68.99.26.170/32 -j DNAT --to-destination 10.0.0.3 \ - -A nova-network-floating-snat -s 10.0.0.3/32 -j SNAT --to-source 68.99.26.170 - -- Check that the public address (``68.99.26.170`` in this example), has been - added to your public interface. You should see the address in the listing - when you use the :command:`ip addr` command: - - .. code-block:: console - - $ ip addr - 2: eth0: mtu 1500 qdisc mq state UP qlen 1000 - link/ether xx:xx:xx:17:4b:c2 brd ff:ff:ff:ff:ff:ff - inet 13.22.194.80/24 brd 13.22.194.255 scope global eth0 - inet 68.99.26.170/32 scope global eth0 - inet6 fe80::82b:2bf:fe1:4b2/64 scope link - valid_lft forever preferred_lft forever - - .. note:: - - You cannot use ``SSH`` to access an instance with a public IP from within - the same server because the routing configuration does not allow it. - -- Use ``tcpdump`` to identify if packets are being routed to the inbound - interface on the compute host. If the packets are reaching the compute hosts - but the connection is failing, the issue may be that the packet is being - dropped by reverse path filtering. Try disabling reverse-path filtering on - the inbound interface. For example, if the inbound interface is ``eth2``, - run: - - .. code-block:: console - - # sysctl -w net.ipv4.conf.ETH2.rp_filter=0 - - If this solves the problem, add the following line to ``/etc/sysctl.conf`` so - that the reverse-path filter is persistent: - - .. code-block:: ini - - net.ipv4.conf.rp_filter=0 - -Temporarily disable firewall ----------------------------- - -Problem -------- - -Networking issues prevent administrators accessing or reaching VMs through -various pathways. - -Solution --------- - -You can disable the firewall by setting this option in ``/etc/nova/nova.conf``: - -.. code-block:: ini - - firewall_driver=nova.virt.firewall.NoopFirewallDriver - -.. warning:: - - We strongly recommend you remove this line to re-enable the firewall once - your networking issues have been resolved. - -Packet loss from instances to nova-network server (VLANManager mode) --------------------------------------------------------------------- - -Problem -------- - -If you can access your instances with ``SSH`` but the network to your instance -is slow, or if you find that running certain operations are slower than they -should be (for example, ``sudo``), packet loss could be occurring on the -connection to the instance. - -Packet loss can be caused by Linux networking configuration settings related to -bridges. Certain settings can cause packets to be dropped between the VLAN -interface (for example, ``vlan100``) and the associated bridge interface (for -example, ``br100``) on the host running ``nova-network``. - -Solution --------- - -One way to check whether this is the problem is to open three terminals and run -the following commands: - -#. In the first terminal, on the host running ``nova-network``, use ``tcpdump`` - on the VLAN interface to monitor DNS-related traffic (UDP, port 53). As - root, run: - - .. code-block:: console - - # tcpdump -K -p -i vlan100 -v -vv udp port 53 - -#. In the second terminal, also on the host running ``nova-network``, use - ``tcpdump`` to monitor DNS-related traffic on the bridge interface. As - root, run: - - .. code-block:: console - - # tcpdump -K -p -i br100 -v -vv udp port 53 - -#. In the third terminal, use ``SSH`` to access the instance and generate DNS - requests by using the :command:`nslookup` command: - - .. code-block:: console - - $ nslookup www.google.com - - The symptoms may be intermittent, so try running :command:`nslookup` - multiple times. If the network configuration is correct, the command should - return immediately each time. If it is not correct, the command hangs for - several seconds before returning. - -#. If the :command:`nslookup` command sometimes hangs, and there are packets - that appear in the first terminal but not the second, then the problem may - be due to filtering done on the bridges. Try disabling filtering, and - running these commands as root: - - .. code-block:: console - - # sysctl -w net.bridge.bridge-nf-call-arptables=0 - # sysctl -w net.bridge.bridge-nf-call-iptables=0 - # sysctl -w net.bridge.bridge-nf-call-ip6tables=0 - - If this solves your issue, add the following line to ``/etc/sysctl.conf`` so - that the changes are persistent: - - .. code-block:: ini - - net.bridge.bridge-nf-call-arptables=0 - net.bridge.bridge-nf-call-iptables=0 - net.bridge.bridge-nf-call-ip6tables=0 - -KVM: Network connectivity works initially, then fails ------------------------------------------------------ - -Problem -------- - -With KVM hypervisors, instances running Ubuntu 12.04 sometimes lose network -connectivity after functioning properly for a period of time. - -Solution --------- - -Try loading the ``vhost_net`` kernel module as a workaround for this issue (see -`bug #997978`_) . This kernel module may also `improve network performance`_ -on KVM. To load the kernel module: - -.. _`bug #997978`: https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/997978/ -.. _`improve network performance`: http://www.linux-kvm.org/page/VhostNet - -.. code-block:: console - - # modprobe vhost_net - -.. note:: - - Loading the module has no effect on running instances. diff --git a/doc/source/admin/networking.rst b/doc/source/admin/networking.rst index 83e4d3df3a2..667a5bf12f3 100644 --- a/doc/source/admin/networking.rst +++ b/doc/source/admin/networking.rst @@ -24,6 +24,18 @@ A full guide on configuring and using SR-IOV is provided in the :neutron-doc:`OpenStack Networking service documentation ` +.. note:: + + Nova only supports PCI addresses where the fields are restricted to the + following maximum value: + + * domain - 0xFFFF + * bus - 0xFF + * slot - 0x1F + * function - 0x7 + + Nova will ignore PCI devices reported by the hypervisor if the address is + outside of these ranges. NUMA Affinity ------------- @@ -50,14 +62,10 @@ Fortunately, nova provides functionality to ensure NUMA affinitization is provided for instances using neutron. How this works depends on the type of port you are trying to use. -.. todo:: - - Add documentation for PCI NUMA affinity and PCI policies and link to it from - here. - For SR-IOV ports, virtual functions, which are PCI devices, are attached to the instance. This means the instance can benefit from the NUMA affinity guarantees -provided for PCI devices. This happens automatically. +provided for PCI devices. This happens automatically and is described in detail +in :ref:`pci-numa-affinity-policy`. For all other types of ports, some manual configuration is required. @@ -84,7 +92,7 @@ For all other types of ports, some manual configuration is required. Consider an L2-type network using the Linux Bridge mechanism driver. As noted in the :neutron-doc:`neutron documentation - `, *physets* are mapped to interfaces + `, *physnets* are mapped to interfaces using the ``[linux_bridge] physical_interface_mappings`` configuration option. For example: @@ -102,9 +110,9 @@ For all other types of ports, some manual configuration is required. For an L3-type network using the Linux Bridge mechanism driver, the device used will be configured using protocol-specific endpoint IP configuration - option. For VXLAN, this is the ``[vxlan] local_ip`` option. For example:: + option. For VXLAN, this is the ``[vxlan] local_ip`` option. For example: - .. code-block:: + .. code-block:: ini [vxlan] local_ip = OVERLAY_INTERFACE_IP_ADDRESS @@ -142,10 +150,18 @@ For all other types of ports, some manual configuration is required. networks on a given host. There is only one configuration option that must be set: - ``[neutron_tunneled] numa_nodes`` + ``[neutron_tunnel] numa_nodes`` This should be set to a list of one or NUMA nodes to which instances using tunneled networks will be affinitized. +#. Configure a NUMA topology for instance flavor(s) + + For network NUMA affinity to have any effect, the instance must have a NUMA + topology itself. This can be configured explicitly, using the + ``hw:numa_nodes`` extra spec, or implicitly through the use of CPU pinning + (``hw:cpu_policy=dedicated``) or PCI devices. For more information, refer to + :doc:`cpu-topologies`. + Examples ~~~~~~~~ @@ -167,14 +183,15 @@ with ``provider:physical_network=foo`` must be scheduled on host cores from NUMA nodes 0, while instances using one or more networks with ``provider:physical_network=bar`` must be scheduled on host cores from both NUMA nodes 2 and 3. For the latter case, it will be necessary to split the -guest across two or more host NUMA nodes using the ``hw:numa_nodes`` -:ref:`flavor extra spec `. +guest across two or more host NUMA nodes using the +:nova:extra-spec:`hw:numa_nodes` extra spec, as discussed :ref:`here +`. Now, take an example for a deployment using L3 networks. .. code-block:: ini - [neutron_tunneled] + [neutron_tunnel] numa_nodes = 0 This is much simpler as all tunneled traffic uses the same logical interface. diff --git a/doc/source/admin/node-down.rst b/doc/source/admin/node-down.rst index f2d5c509688..58311e80888 100644 --- a/doc/source/admin/node-down.rst +++ b/doc/source/admin/node-down.rst @@ -145,7 +145,7 @@ A disk crash, network loss, or power failure can affect several components in your cloud architecture. The worst disaster for a cloud is a power loss. A power loss affects these components: -- A cloud controller (``nova-api``, ``nova-objectstore``, ``nova-network``) +- A cloud controller (``nova-api``, ``nova-conductor``, ``nova-scheduler``) - A compute node (``nova-compute``) @@ -178,9 +178,6 @@ After power resumes and all hardware components restart: - The iSCSI session from the cloud controller to the compute node no longer exists. -- nova-network reapplies configurations on boot and, as a result, recreates - the iptables and ebtables from the cloud controller to the compute node. - - Instances stop running. Instances are not lost because neither ``destroy`` nor ``terminate`` ran. diff --git a/doc/source/admin/pci-passthrough.rst b/doc/source/admin/pci-passthrough.rst index f4f06e0c5df..727a63070de 100644 --- a/doc/source/admin/pci-passthrough.rst +++ b/doc/source/admin/pci-passthrough.rst @@ -15,150 +15,264 @@ as multiple PCI devices. Virtual PCI devices are assigned to the same or different guests. In the case of PCI passthrough, the full physical device is assigned to only one guest and cannot be shared. +PCI devices are requested through flavor extra specs, specifically via the +:nova:extra-spec:`pci_passthrough:alias` flavor extra spec. +This guide demonstrates how to enable PCI passthrough for a type of PCI device +with a vendor ID of ``8086`` and a product ID of ``154d`` - an Intel X520 +Network Adapter - by mapping them to the alias ``a1``. +You should adjust the instructions for other devices with potentially different +capabilities. + .. note:: - For information on attaching virtual SR-IOV devices to guests, refer to the - :neutron-doc:`Networking Guide `. + For information on creating servers with SR-IOV network interfaces, refer to + the :neutron-doc:`Networking Guide `. -To enable PCI passthrough, follow the steps below: + **Limitations** -#. Configure nova-scheduler (Controller) + * Attaching SR-IOV ports to existing servers was not supported until the + 22.0.0 Victoria release. Due to various bugs in libvirt and qemu we + recommend to use at least libvirt version 6.0.0 and at least qemu version + 4.2. + * Cold migration (resize) of servers with SR-IOV devices attached was not + supported until the 14.0.0 Newton release, see + `bug 1512800 `_ for details. -#. Configure nova-api (Controller)** +.. note:: -#. Configure a flavor (Controller) + Nova only supports PCI addresses where the fields are restricted to the + following maximum value: -#. Enable PCI passthrough (Compute) + * domain - 0xFFFF + * bus - 0xFF + * slot - 0x1F + * function - 0x7 -#. Configure PCI devices in nova-compute (Compute) + Nova will ignore PCI devices reported by the hypervisor if the address is + outside of these ranges. -.. note:: +Enabling PCI passthrough +------------------------ - The PCI device with address ``0000:41:00.0`` is used as an example. This - will differ between environments. +Configure compute host +~~~~~~~~~~~~~~~~~~~~~~ -Configure nova-scheduler (Controller) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To enable PCI passthrough on an x86, Linux-based compute node, the following +are required: -#. Configure ``nova-scheduler`` as specified in :neutron-doc:`Configure - nova-scheduler - ` for ``numa_policy`` +information. - For more information about the syntax of ``alias``, refer to :doc:`/configuration/config`. +Once configured, restart the :program:`nova-api` service. -#. Restart the ``nova-compute`` service. -Create instances with PCI passthrough devices -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Configuring a flavor or image +----------------------------- -The ``nova-scheduler`` selects a destination host that has PCI devices -available with the specified ``vendor_id`` and ``product_id`` that matches the -``alias`` from the flavor. +Once the alias has been configured, it can be used for an flavor extra spec. +For example, to request two of the PCI devices referenced by alias ``a1``, run: .. code-block:: console - # openstack server create --flavor m1.large --image cirros-0.3.5-x86_64-uec --wait test-pci + $ openstack flavor set m1.large --property "pci_passthrough:alias"="a1:2" + +For more information about the syntax for ``pci_passthrough:alias``, refer to +:doc:`the documentation `. + + +.. _pci-numa-affinity-policy: + +PCI-NUMA affinity policies +-------------------------- + +By default, the libvirt driver enforces strict NUMA affinity for PCI devices, +be they PCI passthrough devices or neutron SR-IOV interfaces. This means that +by default a PCI device must be allocated from the same host NUMA node as at +least one of the instance's CPUs. This isn't always necessary, however, and you +can configure this policy using the +:nova:extra-spec:`hw:pci_numa_affinity_policy` flavor extra spec or equivalent +image metadata property. There are three possible values allowed: + +**required** + This policy means that nova will boot instances with PCI devices **only** + if at least one of the NUMA nodes of the instance is associated with these + PCI devices. It means that if NUMA node info for some PCI devices could not + be determined, those PCI devices wouldn't be consumable by the instance. + This provides maximum performance. + +**socket** + This policy means that the PCI device must be affined to the same host + socket as at least one of the guest NUMA nodes. For example, consider a + system with two sockets, each with two NUMA nodes, numbered node 0 and node + 1 on socket 0, and node 2 and node 3 on socket 1. There is a PCI device + affined to node 0. An PCI instance with two guest NUMA nodes and the + ``socket`` policy can be affined to either: + + * node 0 and node 1 + * node 0 and node 2 + * node 0 and node 3 + * node 1 and node 2 + * node 1 and node 3 + + The instance cannot be affined to node 2 and node 3, as neither of those + are on the same socket as the PCI device. If the other nodes are consumed + by other instances and only nodes 2 and 3 are available, the instance + will not boot. + +**preferred** + This policy means that ``nova-scheduler`` will choose a compute host + with minimal consideration for the NUMA affinity of PCI devices. + ``nova-compute`` will attempt a best effort selection of PCI devices + based on NUMA affinity, however, if this is not possible then + ``nova-compute`` will fall back to scheduling on a NUMA node that is not + associated with the PCI device. + +**legacy** + This is the default policy and it describes the current nova behavior. + Usually we have information about association of PCI devices with NUMA + nodes. However, some PCI devices do not provide such information. The + ``legacy`` value will mean that nova will boot instances with PCI device + if either: + + * The PCI device is associated with at least one NUMA nodes on which the + instance will be booted + + * There is no information about PCI-NUMA affinity available + +For example, to configure a flavor to use the ``preferred`` PCI NUMA affinity +policy for any neutron SR-IOV interfaces attached by the user: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property hw:pci_numa_affinity_policy=preferred + +You can also configure this for PCI passthrough devices by specifying the +policy in the alias configuration via :oslo.config:option:`pci.alias`. For more +information, refer to :oslo.config:option:`the documentation `. diff --git a/doc/source/admin/ports-with-resource-requests.rst b/doc/source/admin/ports-with-resource-requests.rst new file mode 100644 index 00000000000..2a2a5d41ef6 --- /dev/null +++ b/doc/source/admin/ports-with-resource-requests.rst @@ -0,0 +1,90 @@ +================================= +Using ports with resource request +================================= + +Starting from microversion 2.72 nova supports creating servers with neutron +ports having resource request visible as a admin-only port attribute +``resource_request``. For example a neutron port has resource request if it has +a QoS minimum bandwidth rule attached. + +The :neutron-doc:`Quality of Service (QoS): Guaranteed Bandwidth ` +document describes how to configure neutron to use this feature. + +Resource allocation +~~~~~~~~~~~~~~~~~~~ + +Nova collects and combines the resource request from each port in a boot +request and sends one allocation candidate request to placement during +scheduling so placement will make sure that the resource request of the ports +are fulfilled. At the end of the scheduling nova allocates one candidate in +placement. Therefore the requested resources for each port from a single boot +request will be allocated under the server's allocation in placement. + + +Resource Group policy +~~~~~~~~~~~~~~~~~~~~~ + +Nova represents the resource request of each neutron port as a separate +:placement-doc:`Granular Resource Request group ` +when querying placement for allocation candidates. When a server create request +includes more than one port with resource requests then more than one group +will be used in the allocation candidate query. In this case placement requires +to define the ``group_policy``. Today it is only possible via the +``group_policy`` key of the :nova-doc:`flavor extra_spec `. +The possible values are ``isolate`` and ``none``. + +When the policy is set to ``isolate`` then each request group and therefore the +resource request of each neutron port will be fulfilled from separate resource +providers. In case of neutron ports with ``vnic_type=direct`` or +``vnic_type=macvtap`` this means that each port will use a virtual function +from different physical functions. + +When the policy is set to ``none`` then the resource request of the neutron +ports can be fulfilled from overlapping resource providers. In case of neutron +ports with ``vnic_type=direct`` or ``vnic_type=macvtap`` this means the ports +may use virtual functions from the same physical function. + +For neutron ports with ``vnic_type=normal`` the group policy defines the +collocation policy on OVS bridge level so ``group_policy=none`` is a reasonable +default value in this case. + +If the ``group_policy`` is missing from the flavor then the server create +request will fail with 'No valid host was found' and a warning describing the +missing policy will be logged. + + +Virt driver support +~~~~~~~~~~~~~~~~~~~ + +Supporting neutron ports with ``vnic_type=direct`` or ``vnic_type=macvtap`` +depends on the capability of the virt driver. For the supported virt drivers +see the :nova-doc:`Support matrix ` + +If the virt driver on the compute host does not support the needed capability +then the PCI claim will fail on the host and re-schedule will be triggered. It +is suggested not to configure bandwidth inventory in the neutron agents on +these compute hosts to avoid unnecessary reschedule. + + +Extended resource request +~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is expected that neutron 20.0.0 (Yoga) will implement an extended resource +request format via the the ``port-resource-request-groups`` neutron API +extension. As of nova 24.0.0 (Xena), nova already supports this extension if +every nova-compute service is upgraded to Xena version and the +:oslo.config:option:`upgrade_levels.compute` configuration does not prevent +the computes from using the latest RPC version. + +The extended resource request allows a single Neutron port to request +resources in more than one request groups. This also means that using just one +port in a server create request would require a group policy to be provided +in the flavor. Today the only case when a single port generates more than one +request groups is when that port has QoS policy with both minimum bandwidth +and minimum packet rate rules. Due to the placement resource model of these +features in this case the two request groups will always be fulfilled from +separate resource providers and therefore neither the ``group_policy=none`` +nor the ``group_policy=isolate`` flavor extra specs will result in any +additional restriction on the placement of the resources. In the multi port +case the Resource Group policy section above still applies. + diff --git a/doc/source/admin/quotas.rst b/doc/source/admin/quotas.rst index e9f0935bbb0..c8000b3ba28 100644 --- a/doc/source/admin/quotas.rst +++ b/doc/source/admin/quotas.rst @@ -1,304 +1,370 @@ -============================= -Manage Compute service quotas -============================= - -As an administrative user, you can use the :command:`nova quota-*` commands, -which are provided by the ``python-novaclient`` package, to update the Compute -service quotas for a specific project or project user, as well as update the -quota defaults for a new project. - -.. todo:: - - At some point, probably in Queens, we need to scrub this page and mention - the microversions that remove the proxy and network-related resource quotas. - -.. rubric:: Compute quota descriptions - -.. list-table:: - :header-rows: 1 - :widths: 10 40 - - * - Quota name - - Description - * - cores - - Number of instance cores (VCPUs) allowed per project. - * - fixed-ips - - Number of fixed IP addresses allowed per project. This number - must be equal to or greater than the number of allowed - instances. - * - floating-ips - - Number of floating IP addresses allowed per project. - * - injected-file-content-bytes - - Number of content bytes allowed per injected file. - * - injected-file-path-bytes - - Length of injected file path. - * - injected-files - - Number of injected files allowed per project. - * - instances - - Number of instances allowed per project. - * - key-pairs - - Number of key pairs allowed per user. - * - metadata-items - - Number of metadata items allowed per instance. - * - ram - - Megabytes of instance ram allowed per project. - * - security-groups - - Number of security groups per project. - * - security-group-rules - - Number of security group rules per project. - * - server-groups - - Number of server groups per project. - * - server-group-members - - Number of servers per server group. - -View and update Compute quotas for a project -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To view and update default quota values ---------------------------------------- - -#. List all default quotas for all projects: +============= +Manage quotas +============= - .. code-block:: console +.. note:: - $ openstack quota show --default - - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 10 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - -#. Update a default value for a new project, for example: + This section provides deployment information about the quota feature. For + end-user information about quotas, including information about the type of + quotas available, refer to the :doc:`user guide `. - .. code-block:: console +To prevent system capacities from being exhausted without notification, you can +set up quotas. Quotas are operational limits. For example, the number of +gigabytes allowed for each project can be controlled so that cloud resources +are optimized. Quotas can be enforced at both the project and the project-user +level. - $ openstack quota set --instances 15 default +Starting in the 16.0.0 Pike release, the quota calculation system in nova was +overhauled and the old reserve/commit/rollback flow was changed to `count +resource usage`__ at the point of whatever operation is being performed, such +as creating or resizing a server. A check will be performed by counting current +usage for the relevant resource and then, if +:oslo.config:option:`quota.recheck_quota` is True, another check will be +performed to ensure the initial check is still valid. -To view quota values for an existing project --------------------------------------------- +By default resource usage is counted using the API and cell databases but nova +can be configured to count some resource usage without using the cell +databases. See `Quota usage from placement`_ for details. -#. List the currently set quota values for a project: +Using the command-line interface, you can manage quotas for nova, along with +:cinder-doc:`cinder ` and :neutron-doc:`neutron +`. You would typically change default values +because, for example, a project requires more than ten volumes or 1 TB on a +compute node. - .. code-block:: console +__ https://specs.openstack.org/openstack/nova-specs/specs/pike/implemented/cells-count-resources-to-check-quota-in-api.html - $ openstack quota show PROJECT_NAME - - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 10 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - -To update quota values for an existing project ----------------------------------------------- - -#. Obtain the project ID. - .. code-block:: console +Checking quota +-------------- - $ project=$(openstack project show -f value -c id PROJECT_NAME) +When calculating limits for a given resource and project, the following checks +are made in order: -#. Update a particular quota value. +#. Project-specific limits + + Depending on the resource, is there a project-specific limit on the + resource in either the ``quotas`` or ``project_user_quotas`` tables in the + database? If so, use that as the limit. You can create these resources + using: .. code-block:: console - $ openstack quota set --QUOTA_NAME QUOTA_VALUE PROJECT_OR_CLASS + $ openstack quota set --instances 5 + +#. Default limits - For example: + Check to see if there is a hard limit for the given resource in the + ``quota_classes`` table in the database for the ``default`` quota class. If + so, use that as the limit. You can modify the default quota limit for a + resource using: .. code-block:: console - $ openstack quota set --floating-ips 20 PROJECT_OR_CLASS - $ openstack quota show PROJECT_NAME - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 20 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ + $ openstack quota set --instances 5 --class default .. note:: - To view a list of options for the :command:`openstack quota set` command, - run: + Only the ``default`` class is supported by nova. - .. code-block:: console +#. Config-driven limits - $ openstack help quota set + If the above does not provide a resource limit, then rely on the + configuration options in the :oslo.config:group:`quota` config group for + the default limits. -View and update Compute quotas for a project user -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. note:: -To view quota values for a project user ---------------------------------------- + The API sets the limit in the ``quota_classes`` table. Once a default limit + is set via the `default` quota class, that takes precedence over any + changes to that resource limit in the configuration options. In other + words, once you've changed things via the API, you either have to keep + those synchronized with the configuration values or remove the default + limit from the database manually as there is no REST API for removing quota + class values from the database. -#. Place the user ID in a usable variable. - .. code-block:: console +.. _quota-usage-from-placement: - $ projectUser=$(openstack user show -f value -c id USER_NAME) +Quota usage from placement +-------------------------- -#. Place the user's project ID in a usable variable, as follows: +Starting in the Train (20.0.0) release, it is possible to configure quota usage +counting of cores and RAM from the placement service and instances from +instance mappings in the API database instead of counting resources from cell +databases. This makes quota usage counting resilient in the presence of `down +or poor-performing cells`__. - .. code-block:: console +Quota usage counting from placement is opt-in via the +::oslo.config:option:`quota.count_usage_from_placement` config option: - $ project=$(openstack project show -f value -c id PROJECT_NAME) +.. code-block:: ini -#. List the currently set quota values for a project user. + [quota] + count_usage_from_placement = True - .. code-block:: console +There are some things to note when opting in to counting quota usage from +placement: - $ nova quota-show --user $projectUser --tenant $project +* Counted usage will not be accurate in an environment where multiple Nova + deployments are sharing a placement deployment because currently placement + has no way of partitioning resource providers between different Nova + deployments. Operators who are running multiple Nova deployments that share a + placement deployment should not set the + :oslo.config:option:`quota.count_usage_from_placement` configuration option + to ``True``. - For example: +* Behavior will be different for resizes. During a resize, resource allocations + are held on both the source and destination (even on the same host, see + https://bugs.launchpad.net/nova/+bug/1790204) until the resize is confirmed + or reverted. Quota usage will be inflated for servers in this state and + operators should weigh the advantages and disadvantages before enabling + :oslo.config:option:`quota.count_usage_from_placement`. - .. code-block:: console +* The ``populate_queued_for_delete`` and ``populate_user_id`` online data + migrations must be completed before usage can be counted from placement. + Until the data migration is complete, the system will fall back to legacy + quota usage counting from cell databases depending on the result of an EXISTS + database query during each quota check, if + :oslo.config:option:`quota.count_usage_from_placement` is set to ``True``. + Operators who want to avoid the performance hit from the EXISTS queries + should wait to set the :oslo.config:option:`quota.count_usage_from_placement` + configuration option to ``True`` until after they have completed their online + data migrations via ``nova-manage db online_data_migrations``. - $ nova quota-show --user $projectUser --tenant $project - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 20 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - -To update quota values for a project user ------------------------------------------ - -#. Place the user ID in a usable variable. +* Behavior will be different for unscheduled servers in ``ERROR`` state. A + server in ``ERROR`` state that has never been scheduled to a compute host + will not have placement allocations, so it will not consume quota usage for + cores and ram. - .. code-block:: console +* Behavior will be different for servers in ``SHELVED_OFFLOADED`` state. A + server in ``SHELVED_OFFLOADED`` state will not have placement allocations, so + it will not consume quota usage for cores and ram. Note that because of this, + it will be possible for a request to unshelve a server to be rejected if the + user does not have enough quota available to support the cores and ram needed + by the server to be unshelved. - $ projectUser=$(openstack user show -f value -c id USER_NAME) +__ https://docs.openstack.org/api-guide/compute/down_cells.html -#. Place the user's project ID in a usable variable, as follows: - .. code-block:: console +Known issues +------------ - $ project=$(openstack project show -f value -c id PROJECT_NAME) +If not :ref:`counting quota usage from placement ` +it is possible for down or poor-performing cells to impact quota calculations. +See the :ref:`cells documentation ` for details. -#. Update a particular quota value, as follows: - .. code-block:: console +Future plans +------------ - $ nova quota-update --user $projectUser --QUOTA_NAME QUOTA_VALUE $project +Hierarchical quotas +~~~~~~~~~~~~~~~~~~~ - For example: +There has long been a desire to support hierarchical or nested quotas +leveraging support in the identity service for hierarchical projects. +See the `unified limits`__ spec for details. - .. code-block:: console +__ https://review.opendev.org/#/c/602201/ - $ nova quota-update --user $projectUser --floating-ips 12 $project - $ nova quota-show --user $projectUser --tenant $project - +-----------------------------+-------+ - | Quota | Limit | - +-----------------------------+-------+ - | instances | 10 | - | cores | 20 | - | ram | 51200 | - | floating_ips | 12 | - | fixed_ips | -1 | - | metadata_items | 128 | - | injected_files | 5 | - | injected_file_content_bytes | 10240 | - | injected_file_path_bytes | 255 | - | key_pairs | 100 | - | security_groups | 10 | - | security_group_rules | 20 | - | server_groups | 10 | - | server_group_members | 10 | - +-----------------------------+-------+ - .. note:: +Configuration +------------- + +View and update default quota values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To list all default quotas for a project, run: + +.. code-block:: console + + $ openstack quota show --default + +.. note:: + + This lists default quotas for all services and not just nova. + +To update a default value for a new project, run: + +.. code-block:: console + + $ openstack quota set --class --instances 15 default + +View and update quota values for a project or class +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To list quotas for a project, run: + +.. code-block:: console + + $ openstack quota show PROJECT + +.. note:: + + This lists project quotas for all services and not just nova. - To view a list of options for the :command:`nova quota-update` command, - run: +To update quotas for a project, run: - .. code-block:: console +.. code-block:: console + + $ openstack quota set --QUOTA QUOTA_VALUE PROJECT + +To update quotas for a class, run: + +.. code-block:: console - $ nova help quota-update + $ openstack quota set --class --QUOTA QUOTA_VALUE CLASS -To display the current quota usage for a project user ------------------------------------------------------ +.. note:: + + Only the ``default`` class is supported by nova. + +For example: + +.. code-block:: console -Use :command:`nova limits` to get a list of the -current quota values and the current quota usage: + $ openstack quota set --instances 12 my-project + $ openstack quota show my-project + +----------------------+----------------------------------+ + | Field | Value | + +----------------------+----------------------------------+ + | backup-gigabytes | 1000 | + | backups | 10 | + | cores | 32 | + | fixed-ips | -1 | + | floating-ips | 10 | + | gigabytes | 1000 | + | health_monitors | None | + | injected-file-size | 10240 | + | injected-files | 5 | + | injected-path-size | 255 | + | instances | 12 | + | key-pairs | 100 | + | l7_policies | None | + | listeners | None | + | load_balancers | None | + | location | None | + | name | None | + | networks | 20 | + | per-volume-gigabytes | -1 | + | pools | None | + | ports | 60 | + | project | c8156b55ec3b486193e73d2974196993 | + | project_name | project | + | properties | 128 | + | ram | 65536 | + | rbac_policies | 10 | + | routers | 10 | + | secgroup-rules | 50 | + | secgroups | 50 | + | server-group-members | 10 | + | server-groups | 10 | + | snapshots | 10 | + | subnet_pools | -1 | + | subnets | 20 | + | volumes | 10 | + +----------------------+----------------------------------+ + +To view a list of options for the :command:`openstack quota show` and +:command:`openstack quota set` commands, run: .. code-block:: console - $ nova limits --tenant PROJECT_NAME - - +------+-----+-------+--------+------+----------------+ - | Verb | URI | Value | Remain | Unit | Next_Available | - +------+-----+-------+--------+------+----------------+ - +------+-----+-------+--------+------+----------------+ - - +--------------------+------+-------+ - | Name | Used | Max | - +--------------------+------+-------+ - | Cores | 0 | 20 | - | Instances | 0 | 10 | - | Keypairs | - | 100 | - | Personality | - | 5 | - | Personality Size | - | 10240 | - | RAM | 0 | 51200 | - | Server Meta | - | 128 | - | ServerGroupMembers | - | 10 | - | ServerGroups | 0 | 10 | - +--------------------+------+-------+ + $ openstack quota show --help + $ openstack quota set --help + +View and update quota values for a project user +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: - The :command:`nova limits` command generates an empty - table as a result of the Compute API, which prints an - empty list for backward compatibility purposes. + User-specific quotas are legacy and will be removed when migration to + :keystone-doc:`unified limits ` is complete. + User-specific quotas were added as a way to provide two-level hierarchical + quotas and this feature is already being offered in unified limits. For + this reason, the below commands have not and will not be ported to + openstackclient. + +To show quotas for a specific project user, run: + +.. code-block:: console + + $ nova quota-show --user USER PROJECT + +To update quotas for a specific project user, run: + +.. code-block:: console + + $ nova quota-update --user USER --QUOTA QUOTA_VALUE PROJECT + +For example: + +.. code-block:: console + + $ projectUser=$(openstack user show -f value -c id USER) + $ project=$(openstack project show -f value -c id PROJECT) + + $ nova quota-update --user $projectUser --instance 12 $project + $ nova quota-show --user $projectUser --tenant $project + +-----------------------------+-------+ + | Quota | Limit | + +-----------------------------+-------+ + | instances | 12 | + | cores | 20 | + | ram | 51200 | + | floating_ips | 10 | + | fixed_ips | -1 | + | metadata_items | 128 | + | injected_files | 5 | + | injected_file_content_bytes | 10240 | + | injected_file_path_bytes | 255 | + | key_pairs | 100 | + | security_groups | 10 | + | security_group_rules | 20 | + | server_groups | 10 | + | server_group_members | 10 | + +-----------------------------+-------+ + +To view the quota usage for the current user, run: + +.. code-block:: console + + $ nova limits --tenant PROJECT + +For example: + +.. code-block:: console + + $ nova limits --tenant my-project + +------+-----+-------+--------+------+----------------+ + | Verb | URI | Value | Remain | Unit | Next_Available | + +------+-----+-------+--------+------+----------------+ + +------+-----+-------+--------+------+----------------+ + + +--------------------+------+-------+ + | Name | Used | Max | + +--------------------+------+-------+ + | Cores | 0 | 20 | + | Instances | 0 | 10 | + | Keypairs | - | 100 | + | Personality | - | 5 | + | Personality Size | - | 10240 | + | RAM | 0 | 51200 | + | Server Meta | - | 128 | + | ServerGroupMembers | - | 10 | + | ServerGroups | 0 | 10 | + +--------------------+------+-------+ + +.. note:: + + The :command:`nova limits` command generates an empty table as a result of + the Compute API, which prints an empty list for backward compatibility + purposes. + +To view a list of options for the :command:`nova quota-show` and +:command:`nova quota-update` commands, run: + +.. code-block:: console + + $ nova help quota-show + $ nova help quota-update diff --git a/doc/source/admin/quotas2.rst b/doc/source/admin/quotas2.rst deleted file mode 100644 index 19cd8d50751..00000000000 --- a/doc/source/admin/quotas2.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. _manage-quotas: - -============= -Manage quotas -============= - -.. todo:: Merge this into 'quotas.rst' - -To prevent system capacities from being exhausted without notification, you can -set up quotas. Quotas are operational limits. For example, the number of -gigabytes allowed for each project can be controlled so that cloud resources -are optimized. Quotas can be enforced at both the project and the project-user -level. - -Using the command-line interface, you can manage quotas for the OpenStack -Compute service, the OpenStack Block Storage service, and the OpenStack -Networking service. - -The cloud operator typically changes default values because a project requires -more than ten volumes or 1 TB on a compute node. - -.. note:: - - To view all projects, run: - - .. code-block:: console - - $ openstack project list - +----------------------------------+----------+ - | ID | Name | - +----------------------------------+----------+ - | e66d97ac1b704897853412fc8450f7b9 | admin | - | bf4a37b885fe46bd86e999e50adad1d3 | services | - | 21bd1c7c95234fd28f589b60903606fa | tenant01 | - | f599c5cd1cba4125ae3d7caed08e288c | tenant02 | - +----------------------------------+----------+ - - To display all current users for a project, run: - - .. code-block:: console - - $ openstack user list --project PROJECT_NAME - +----------------------------------+--------+ - | ID | Name | - +----------------------------------+--------+ - | ea30aa434ab24a139b0e85125ec8a217 | demo00 | - | 4f8113c1d838467cad0c2f337b3dfded | demo01 | - +----------------------------------+--------+ - -Use :samp:`openstack quota show {PROJECT_NAME}` to list all quotas for a -project. - -Use :samp:`openstack quota set {PROJECT_NAME} {--parameters}` to set quota -values. diff --git a/doc/source/admin/real-time.rst b/doc/source/admin/real-time.rst new file mode 100644 index 00000000000..cad78df93a5 --- /dev/null +++ b/doc/source/admin/real-time.rst @@ -0,0 +1,152 @@ +========= +Real Time +========= + +.. versionadded:: 13.0.0 (Mitaka) + +Nova supports configuring `real-time policies`__ for instances. This builds upon +the improved performance offered by :doc:`CPU pinning ` by +providing stronger guarantees for worst case scheduler latency for vCPUs. + +.. __: https://en.wikipedia.org/wiki/Real-time_computing + + +Enabling Real-Time +------------------ + +Currently the creation of real-time instances is only supported when using the +libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` of +``kvm`` or ``qemu``. It requires extensive configuration of the host and this +document provides but a rough overview of the changes required. Configuration +will vary depending on your hardware, BIOS configuration, host and guest OS' +and application. + +BIOS configuration +~~~~~~~~~~~~~~~~~~ + +Configure your host BIOS as recommended in the `rt-wiki`__ page. +The most important steps are: + +- Disable power management, including CPU sleep states +- Disable SMT (hyper-threading) or any option related to logical processors + +These are standard steps used in benchmarking as both sets of features can +result in non-deterministic behavior. + +.. __: https://rt.wiki.kernel.org/index.php/HOWTO:_Build_an_RT-application + +OS configuration +~~~~~~~~~~~~~~~~ + +This is inherently specific to the distro used, however, there are some common +steps: + +- Install the real-time (preemptible) kernel (``PREEMPT_RT_FULL``) and + real-time KVM modules +- Configure hugepages +- Isolate host cores to be used for instances from the kernel +- Disable features like CPU frequency scaling (e.g. P-States on Intel + processors) + +RHEL and RHEL-derived distros like CentOS provide packages in their +repositories to accomplish. The ``kernel-rt`` and ``kernel-rt-kvm`` +packages will provide the real-time kernel and real-time KVM module, +respectively, while the ``tuned-profiles-realtime`` package will provide +`tuned`__ profiles to configure the host for real-time workloads. You should +refer to your distro documentation for more information. + +.. __: https://tuned-project.org/ + +Validation +~~~~~~~~~~ + +Once your BIOS and the host OS have been configured, you can validate +"real-time readiness" using the ``hwlatdetect`` and ``rteval`` utilities. On +RHEL and RHEL-derived hosts, you can install these using the ``rt-tests`` +package. More information about the ``rteval`` tool can be found `here`__. + +.. __: https://git.kernel.org/pub/scm/utils/rteval/rteval.git/tree/README + + +Configuring a flavor or image +----------------------------- + +.. versionchanged:: 22.0.0 (Victoria) + + Previously, it was necessary to specify + :nova:extra-spec:`hw:cpu_realtime_mask` when realtime mode was enabled via + :nova:extra-spec:`hw:cpu_realtime`. Starting in Victoria, it is possible + to omit this when an emulator thread policy is configured using the + :nova:extra-spec:`hw:emulator_threads_policy` extra spec, thus allowing all + guest cores to be be allocated as real-time cores. + +.. versionchanged:: 22.0.0 (Victoria) + + Previously, a leading caret was necessary when specifying the value for + :nova:extra-spec:`hw:cpu_realtime_mask` and omitting it would be equivalent + to not setting the mask, resulting in a failure to spawn the instance. + +Compared to configuring the host, configuring the guest is relatively trivial +and merely requires a combination of flavor extra specs and image metadata +properties, along with a suitable real-time guest OS. + +Enable real-time by setting the :nova:extra-spec:`hw:cpu_realtime` flavor extra +spec to ``yes`` or a truthy value. When this is configured, it is necessary to +specify where guest overhead processes should be scheduled to. This can be +accomplished in one of three ways. Firstly, the +:nova:extra-spec:`hw:cpu_realtime_mask` extra spec or equivalent image metadata +property can be used to indicate which guest cores should be scheduled as +real-time cores, leaving the remainder to be scheduled as non-real-time cores +and to handle overhead processes. For example, to allocate the first two cores +of an 8 core instance as the non-real-time cores: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property hw:cpu_realtime=yes \ + --property hw:cpu_realtime_mask=2-7 # so 0,1 are non-real-time + +In this configuration, any non-real-time cores configured will have an implicit +``dedicated`` :ref:`CPU pinning policy ` applied. It is +possible to apply a ``shared`` policy for these non-real-time cores by +specifying the ``mixed`` :ref:`CPU pinning policy ` via +the :nova:extra-spec:`hw:cpu_policy` extra spec. This can be useful to increase +resource utilization of the host. For example: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property hw:cpu_policy=mixed \ + --property hw:cpu_realtime=yes \ + --property hw:cpu_realtime_mask=2-7 # so 0,1 are non-real-time and unpinned + +Finally, you can explicitly :ref:`offload guest overhead processes to another +host core ` using the +:nova:extra-spec:`hw:emulator_threads_policy` extra spec. For example: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property hw:cpu_realtime=yes \ + --property hw:emulator_thread_policy=share + +.. note:: + + Emulator thread pinning requires additional host configuration. + Refer to :ref:`the documentation ` for + more information. + +In addition to configuring the instance CPUs, it is also likely that you will +need to configure guest huge pages. For information on how to configure these, +refer to :doc:`the documentation ` + +References +---------- + +* `Libvirt real time instances (spec)`__ +* `The Real Time Linux collaborative project`__ +* `Deploying Real Time OpenStack`__ + +.. __: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/libvirt-real-time.html +.. __: https://wiki.linuxfoundation.org/realtime/start +.. __: https://that.guru/blog/deploying-real-time-openstack/ diff --git a/doc/source/admin/remote-console-access.rst b/doc/source/admin/remote-console-access.rst index f5a2f01556b..01ef44810c3 100644 --- a/doc/source/admin/remote-console-access.rst +++ b/doc/source/admin/remote-console-access.rst @@ -2,111 +2,164 @@ Configure remote console access =============================== -To provide a remote console or remote desktop access to guest virtual machines, -use VNC, SPICE HTML5 or Serial through either the OpenStack dashboard or the -command line. Best practice is to select only one of them to run. +OpenStack provides a number of different methods to interact with your guests: +VNC, SPICE, Serial, RDP or MKS. If configured, these can be accessed by users +through the OpenStack dashboard or the command line. This document outlines how +these different technologies can be configured. -.. _about-nova-consoleauth: -About nova-consoleauth ----------------------- +Overview +-------- -The client proxies leverage a shared service to manage token authentication -called ``nova-consoleauth``. This service must be running for either proxy to -work. Many proxies of either type can be run against a single -``nova-consoleauth`` service in a cluster configuration. +It is considered best practice to deploy only one of the consoles types and +not all console types are supported by all compute drivers. Regardless of what +option is chosen, a console proxy service is required. These proxy services are +responsible for the following: -Do not confuse the ``nova-consoleauth`` shared service with ``nova-console``, -which is a XenAPI-specific service that most recent VNC proxy architectures do -not use. +- Provide a bridge between the public network where the clients live and the + private network where the servers with consoles live. -.. deprecated:: 18.0.0 +- Mediate token authentication. - ``nova-consoleauth`` is deprecated since 18.0.0 (Rocky) and will be removed - in an upcoming release. +- Transparently handle hypervisor-specific connection details to provide a + uniform client experience. -SPICE console -------------- +For some combinations of compute driver and console driver, these proxy +services are provided by the hypervisor or another service. For all others, +nova provides services to handle this proxying. Consider a noVNC-based VNC +console connection for example: -OpenStack Compute supports VNC consoles to guests. The VNC protocol is fairly -limited, lacking support for multiple monitors, bi-directional audio, reliable -cut-and-paste, video streaming and more. SPICE is a new protocol that aims to -address the limitations in VNC and provide good remote desktop support. +#. A user connects to the API and gets an ``access_url`` such as, + ``http://ip:port/?path=%3Ftoken%3Dxyz``. -SPICE support in OpenStack Compute shares a similar architecture to the VNC -implementation. The OpenStack dashboard uses a SPICE-HTML5 widget in its -console tab that communicates to the ``nova-spicehtml5proxy`` service by using -SPICE-over-websockets. The ``nova-spicehtml5proxy`` service communicates -directly with the hypervisor process by using SPICE. +#. The user pastes the URL in a browser or uses it as a client parameter. -VNC must be explicitly disabled to get access to the SPICE console. Set the -``vnc_enabled`` option to ``False`` in the ``[DEFAULT]`` section to disable the -VNC console. +#. The browser or client connects to the proxy. -Use the following options to configure SPICE as the console for OpenStack -Compute: +#. The proxy authorizes the token for the user, and maps the token to the + *private* host and port of the VNC server for an instance. -.. code-block:: console + The compute host specifies the address that the proxy should use to connect + through the :oslo.config:option:`vnc.server_proxyclient_address` option. In + this way, the VNC proxy works as a bridge between the public network and + private host network. - [spice] - agent_enabled = False - enabled = True - html5proxy_base_url = http://IP_ADDRESS:6082/spice_auto.html - html5proxy_host = 0.0.0.0 - html5proxy_port = 6082 - keymap = en-us - server_listen = 127.0.0.1 - server_proxyclient_address = 127.0.0.1 +#. The proxy initiates the connection to VNC server and continues to proxy + until the session ends. -Replace ``IP_ADDRESS`` with the management interface IP address of the -controller or the VIP. +This means a typical deployment with noVNC-based VNC consoles will have the +following components: -VNC console proxy ------------------ +- One or more :program:`nova-novncproxy` service. Supports browser-based noVNC + clients. For simple deployments, this service typically runs on the same + machine as :program:`nova-api` because it operates as a proxy between the + public network and the private compute host network. -The VNC proxy is an OpenStack component that enables compute service users to -access their instances through VNC clients. +- One or more :program:`nova-compute` services. Hosts the instances for which + consoles are provided. + +.. todo:: + + The below diagram references :program:`nova-consoleauth` and needs to be + updated. + +This particular example is illustrated below. + +.. figure:: figures/SCH_5009_V00_NUAC-VNC_OpenStack.png + :alt: noVNC process + :width: 95% + + +noVNC-based VNC console +----------------------- + +VNC is a graphical console with wide support among many hypervisors and +clients. noVNC provides VNC support through a web browser. .. note:: - The web proxy console URLs do not support the websocket protocol scheme - (ws://) on python versions less than 2.7.4. + It has `been reported`__ that versions of noVNC older than 0.6 do not work + with the :program:`nova-novncproxy` service. -The VNC console connection works as follows: + If using non-US key mappings, you need at least noVNC 1.0.0 for `a fix`__. -#. A user connects to the API and gets an ``access_url`` such as, - ``http://ip:port/?token=xyz``. + If using VMware ESX/ESXi hypervisors, you need at least noVNC 1.1.0 for + `a fix`__. -#. The user pastes the URL in a browser or uses it as a client parameter. + __ https://bugs.launchpad.net/nova/+bug/1752896 + __ https://github.com/novnc/noVNC/commit/99feba6ba8fee5b3a2b2dc99dc25e9179c560d31 + __ https://github.com/novnc/noVNC/commit/2c813a33fe6821f5af737327c50f388052fa963b -#. The browser or client connects to the proxy. +Configuration +~~~~~~~~~~~~~ -#. The proxy talks to ``nova-consoleauth`` to authorize the token for the user, - and maps the token to the *private* host and port of the VNC server for an - instance. +To enable the noVNC VNC console service, you must configure both the +:program:`nova-novncproxy` service and the :program:`nova-compute` service. +Most options are defined in the :oslo.config:group:`vnc` group. - The compute host specifies the address that the proxy should use to connect - through the ``nova.conf`` file option, ``server_proxyclient_address``. In - this way, the VNC proxy works as a bridge between the public network and - private host network. +The :program:`nova-novncproxy` service accepts the following options: -#. The proxy initiates the connection to VNC server and continues to proxy - until the session ends. +- :oslo.config:option:`daemon` +- :oslo.config:option:`ssl_only` +- :oslo.config:option:`source_is_ipv6` +- :oslo.config:option:`cert` +- :oslo.config:option:`key` +- :oslo.config:option:`web` +- :oslo.config:option:`console.ssl_ciphers` +- :oslo.config:option:`console.ssl_minimum_version` +- :oslo.config:option:`vnc.novncproxy_host` +- :oslo.config:option:`vnc.novncproxy_port` -The proxy also tunnels the VNC protocol over WebSockets so that the ``noVNC`` -client can talk to VNC servers. In general, the VNC proxy: +If using the libvirt compute driver and enabling :ref:`vnc-security`, the +following additional options are supported: -- Bridges between the public network where the clients live and the private - network where VNC servers live. +- :oslo.config:option:`vnc.auth_schemes` +- :oslo.config:option:`vnc.vencrypt_client_key` +- :oslo.config:option:`vnc.vencrypt_client_cert` +- :oslo.config:option:`vnc.vencrypt_ca_certs` -- Mediates token authentication. +For example, to configure this via a ``nova-novncproxy.conf`` file: -- Transparently deals with hypervisor-specific connection details to provide a - uniform client experience. +.. code-block:: ini -.. figure:: figures/SCH_5009_V00_NUAC-VNC_OpenStack.png - :alt: noVNC process - :width: 95% + [vnc] + novncproxy_host = 0.0.0.0 + novncproxy_port = 6082 + +.. note:: + + This doesn't show configuration with security. For information on how to + configure this, refer to :ref:`vnc-security` below. + +The :program:`nova-compute` service requires the following options to configure +noVNC-based VNC console support: + +- :oslo.config:option:`vnc.enabled` +- :oslo.config:option:`vnc.novncproxy_base_url` +- :oslo.config:option:`vnc.server_listen` +- :oslo.config:option:`vnc.server_proxyclient_address` + +If using the VMware compute driver, the following additional options are +supported: + +- :oslo.config:option:`vmware.vnc_port` +- :oslo.config:option:`vmware.vnc_port_total` + +For example, to configure this via a ``nova.conf`` file: + +.. code-block:: ini + + [vnc] + enabled = True + novncproxy_base_url = http://IP_ADDRESS:6082/vnc_auto.html + server_listen = 127.0.0.1 + server_proxyclient_address = 127.0.0.1 + +Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible +by the outside world. For example, this may be the management interface IP +address of the controller or the VIP. + +.. _vnc-security: VNC proxy security ~~~~~~~~~~~~~~~~~~ @@ -142,7 +195,7 @@ certificates: The authority certificate used to sign ``server-cert.pem`` and sign the VNC proxy server certificates. -The certificates must have v3 basic constraints [3]_ present to indicate the +The certificates must have v3 basic constraints [2]_ present to indicate the permitted key use and purpose data. We recommend using a dedicated certificate authority solely for the VNC @@ -151,7 +204,7 @@ for the OpenStack deployment. This is because libvirt does not currently have a mechanism to restrict what certificates can be presented by the proxy server. For further details on certificate creation, consult the QEMU manual page -documentation on VNC server certificate setup [2]_. +documentation on VNC server certificate setup [1]_. Configure libvirt to enable the VeNCrypt authentication scheme for the VNC server. In :file:`/etc/libvirt/qemu.conf`, uncomment the following settings: @@ -170,9 +223,9 @@ server. In :file:`/etc/libvirt/qemu.conf`, uncomment the following settings: After editing :file:`qemu.conf`, the ``libvirtd`` service must be restarted: -.. code:: shell +.. code-block:: shell - $ systemctl restart libvirtd.service + $ systemctl restart libvirtd.service Changes will not apply to any existing running guests on the Compute node, so this configuration should be done before launching any instances. @@ -185,10 +238,10 @@ scheme, which does no checking. Therefore, it is necessary to enable the ``vencrypt`` authentication scheme by editing the :file:`nova.conf` file to set. -.. code:: +.. code-block:: ini - [vnc] - auth_schemes=vencrypt,none + [vnc] + auth_schemes=vencrypt,none The :oslo.config:option:`vnc.auth_schemes` values should be listed in order of preference. If enabling VeNCrypt on an existing deployment which already has @@ -224,188 +277,275 @@ certificates to the noVNC proxy. The certificate authority cert used to sign ``client-cert.pem`` and sign the compute node VNC server certificates. -The certificates must have v3 basic constraints [3]_ present to indicate the +The certificates must have v3 basic constraints [2]_ present to indicate the permitted key use and purpose data. Once the certificates have been created, the noVNC console proxy service must be told where to find them. This requires editing :file:`nova.conf` to set. -.. code:: +.. code-block:: ini - [vnc] - vencrypt_client_key=/etc/pki/nova-novncproxy/client-key.pem - vencrypt_client_cert=/etc/pki/nova-novncproxy/client-cert.pem - vencrypt_ca_certs=/etc/pki/nova-novncproxy/ca-cert.pem + [vnc] + vencrypt_client_key=/etc/pki/nova-novncproxy/client-key.pem + vencrypt_client_cert=/etc/pki/nova-novncproxy/client-cert.pem + vencrypt_ca_certs=/etc/pki/nova-novncproxy/ca-cert.pem -VNC configuration options -~~~~~~~~~~~~~~~~~~~~~~~~~ -To customize the VNC console, use the following configuration options in your -``nova.conf`` file: +SPICE console +------------- -.. note:: +The VNC protocol is fairly limited, lacking support for multiple monitors, +bi-directional audio, reliable cut-and-paste, video streaming and more. SPICE +is a new protocol that aims to address the limitations in VNC and provide good +remote desktop support. - To support :ref:`live migration `, - you cannot specify a specific IP address for ``server_listen``, because - that IP address does not exist on the destination host. - -.. list-table:: **Description of VNC configuration options** - :header-rows: 1 - :widths: 25 25 - - * - Configuration option = Default value - - Description - * - **[DEFAULT]** - - - * - ``daemon = False`` - - (BoolOpt) Become a daemon (background process) - * - ``key = None`` - - (StrOpt) SSL key file (if separate from cert) - * - ``novncproxy_host = 0.0.0.0`` - - (StrOpt) Host on which to listen for incoming requests - * - ``novncproxy_port = 6080`` - - (IntOpt) Port on which to listen for incoming requests - * - ``record = False`` - - (BoolOpt) Record sessions to FILE.[session_number] - * - ``source_is_ipv6 = False`` - - (BoolOpt) Source is ipv6 - * - ``ssl_only = False`` - - (BoolOpt) Disallow non-encrypted connections - * - ``web = /usr/share/spice-html5`` - - (StrOpt) Run webserver on same port. Serve files from DIR. - * - **[vmware]** - - - * - ``vnc_port = 5900`` - - (IntOpt) VNC starting port - * - ``vnc_port_total = 10000`` - - vnc_port_total = 10000 - * - **[vnc]** - - - * - enabled = True - - (BoolOpt) Enable VNC related features - * - novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html - - (StrOpt) Location of VNC console proxy, in the form - "http://127.0.0.1:6080/vnc_auto.html" - * - server_listen = 127.0.0.1 - - (StrOpt) IP address on which instance vncservers should listen - * - server_proxyclient_address = 127.0.0.1 - - (StrOpt) The address to which proxy clients (like nova-xvpvncproxy) - should connect - * - xvpvncproxy_base_url = http://127.0.0.1:6081/console - - (StrOpt) Location of nova xvp VNC console proxy, in the form - "http://127.0.0.1:6081/console" +SPICE support in OpenStack Compute shares a similar architecture to the VNC +implementation. The OpenStack dashboard uses a SPICE-HTML5 widget in its +console tab that communicates with the :program:`nova-spicehtml5proxy` service +by using SPICE-over-websockets. The :program:`nova-spicehtml5proxy` service +communicates directly with the hypervisor process by using SPICE. -.. note:: +Configuration +~~~~~~~~~~~~~ - - The ``server_proxyclient_address`` defaults to ``127.0.0.1``, which is - the address of the compute host that Compute instructs proxies to use when - connecting to instance servers. +.. important:: - - For all-in-one XenServer domU deployments, set this to ``169.254.0.1.`` + VNC must be explicitly disabled to get access to the SPICE console. Set the + :oslo.config:option:`vnc.enabled` option to ``False`` to disable the + VNC console. - - For multi-host XenServer domU deployments, set to a ``dom0 management IP`` - on the same network as the proxies. +To enable the SPICE console service, you must configure both the +:program:`nova-spicehtml5proxy` service and the :program:`nova-compute` +service. Most options are defined in the :oslo.config:group:`spice` group. - - For multi-host libvirt deployments, set to a host management IP on the - same network as the proxies. +The :program:`nova-spicehtml5proxy` service accepts the following options. -Typical deployment -~~~~~~~~~~~~~~~~~~ +- :oslo.config:option:`daemon` +- :oslo.config:option:`ssl_only` +- :oslo.config:option:`source_is_ipv6` +- :oslo.config:option:`cert` +- :oslo.config:option:`key` +- :oslo.config:option:`web` +- :oslo.config:option:`console.ssl_ciphers` +- :oslo.config:option:`console.ssl_minimum_version` +- :oslo.config:option:`spice.html5proxy_host` +- :oslo.config:option:`spice.html5proxy_port` -A typical deployment has the following components: +For example, to configure this via a ``nova-spicehtml5proxy.conf`` file: -- A ``nova-consoleauth`` process. Typically runs on the controller host. +.. code-block:: ini -- One or more ``nova-novncproxy`` services. Supports browser-based noVNC - clients. For simple deployments, this service typically runs on the same - machine as ``nova-api`` because it operates as a proxy between the public - network and the private compute host network. + [spice] + html5proxy_host = 0.0.0.0 + html5proxy_port = 6082 -- One or more ``nova-xvpvncproxy`` services. Supports the special Java client - discussed here. For simple deployments, this service typically runs on the - same machine as ``nova-api`` because it acts as a proxy between the public - network and the private compute host network. +The :program:`nova-compute` service requires the following options to configure +SPICE console support. -- One or more compute hosts. These compute hosts must have correctly configured - options, as follows. +- :oslo.config:option:`spice.enabled` +- :oslo.config:option:`spice.agent_enabled` +- :oslo.config:option:`spice.html5proxy_base_url` +- :oslo.config:option:`spice.server_listen` +- :oslo.config:option:`spice.server_proxyclient_address` -nova-novncproxy (noVNC) -~~~~~~~~~~~~~~~~~~~~~~~ +For example, to configure this via a ``nova.conf`` file: -You must install the noVNC package, which contains the ``nova-novncproxy`` -service. As root, run the following command: +.. code-block:: ini -.. code-block:: console + [spice] + agent_enabled = False + enabled = True + html5proxy_base_url = http://IP_ADDRESS:6082/spice_auto.html + server_listen = 127.0.0.1 + server_proxyclient_address = 127.0.0.1 - # apt-get install nova-novncproxy +Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible +by the outside world. For example, this may be the management interface IP +address of the controller or the VIP. -.. note:: - It has `been reported`_ that versions of noVNC older than 0.6 do not work - with the ``nova-novncproxy`` service. +Serial +------ - If using non-US key mappings, then you need at least noVNC 1.0.0 for `a fix - `_. +Serial consoles provide an alternative to graphical consoles like VNC or SPICE. +They work a little differently to graphical consoles so an example is +beneficial. The example below uses these nodes: -.. _been reported: https://bugs.launchpad.net/nova/+bug/1752896 +* controller node with IP ``192.168.50.100`` +* compute node 1 with IP ``192.168.50.104`` +* compute node 2 with IP ``192.168.50.105`` -The service starts automatically on installation. +Here's the general flow of actions: -To restart the service, run: +.. figure:: figures/serial-console-flow.svg + :width: 100% + :alt: The serial console flow -.. code-block:: console +1. The user requests a serial console connection string for an instance + from the REST API. +2. The :program:`nova-api` service asks the :program:`nova-compute` service, + which manages that instance, to fulfill that request. +3. That connection string gets used by the user to connect to the + :program:`nova-serialproxy` service. +4. The :program:`nova-serialproxy` service then proxies the console interaction + to the port of the compute node where the instance is running. That port + gets forwarded by the hypervisor (or ironic conductor, for ironic) to the + guest. - # service nova-novncproxy restart +Configuration +~~~~~~~~~~~~~ -The configuration option parameter should point to your ``nova.conf`` file, -which includes the message queue server address and credentials. +To enable the serial console service, you must configure both the +:program:`nova-serialproxy` service and the :program:`nova-compute` service. +Most options are defined in the :oslo.config:group:`serial_console` group. -By default, ``nova-novncproxy`` binds on ``0.0.0.0:6080``. +The :program:`nova-serialproxy` service accepts the following options. -To connect the service to your Compute deployment, add the following -configuration options to your ``nova.conf`` file: +- :oslo.config:option:`daemon` +- :oslo.config:option:`ssl_only` +- :oslo.config:option:`source_is_ipv6` +- :oslo.config:option:`cert` +- :oslo.config:option:`key` +- :oslo.config:option:`web` +- :oslo.config:option:`console.ssl_ciphers` +- :oslo.config:option:`console.ssl_minimum_version` +- :oslo.config:option:`serial_console.serialproxy_host` +- :oslo.config:option:`serial_console.serialproxy_port` -- ``server_listen=0.0.0.0`` +For example, to configure this via a ``nova-serialproxy.conf`` file: - Specifies the address on which the VNC service should bind. Make sure it is - assigned one of the compute node interfaces. This address is the one used by - your domain file. +.. code-block:: ini - .. code-block:: console + [serial_console] + serialproxy_host = 0.0.0.0 + serialproxy_port = 6083 - +The :program:`nova-compute` service requires the following options to configure +serial console support. - .. note:: +- :oslo.config:option:`serial_console.enabled` +- :oslo.config:option:`serial_console.base_url` +- :oslo.config:option:`serial_console.proxyclient_address` +- :oslo.config:option:`serial_console.port_range` - To use live migration, use the 0.0.0.0 address. +For example, to configure this via a ``nova.conf`` file: -- ``server_proxyclient_address=127.0.0.1`` +.. code-block:: ini - The address of the compute host that Compute instructs proxies to use when - connecting to instance ``vncservers``. + [serial_console] + enabled = True + base_url = ws://IP_ADDRESS:6083/ + proxyclient_address = 127.0.0.1 + port_range = 10000:20000 + +Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible +by the outside world. For example, this may be the management interface IP +address of the controller or the VIP. + +There are some things to keep in mind when configuring these options: + +* :oslo.config:option:`serial_console.serialproxy_host` is the address the + :program:`nova-serialproxy` service listens to for incoming connections. +* :oslo.config:option:`serial_console.serialproxy_port` must be the same value + as the port in the URI of :oslo.config:option:`serial_console.base_url`. +* The URL defined in :oslo.config:option:`serial_console.base_url` will form + part of the response the user will get when asking for a serial console + connection string. This means it needs to be an URL the user can connect to. +* :oslo.config:option:`serial_console.proxyclient_address` will be used by the + :program:`nova-serialproxy` service to determine where to connect to for + proxying the console interaction. -Frequently asked questions about VNC access to virtual machines -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- **Q: What is the difference between ``nova-xvpvncproxy`` and - ``nova-novncproxy``?** +RDP +--- - A: ``nova-xvpvncproxy``, which ships with OpenStack Compute, is a proxy that - supports a simple Java client. nova-novncproxy uses noVNC to provide VNC - support through a web browser. +RDP is a graphical console primarily used with Hyper-V. Nova does not provide a +console proxy service for RDP - instead, an external proxy service, such as the +:program:`wsgate` application provided by `FreeRDP-WebConnect`__, should be +used. + +__ https://github.com/FreeRDP/FreeRDP-WebConnect + +Configuration +~~~~~~~~~~~~~ + +To enable the RDP console service, you must configure both a console proxy +service like :program:`wsgate` and the :program:`nova-compute` service. All +options for the latter service are defined in the :oslo.config:group:`rdp` +group. + +Information on configuring an RDP console proxy service, such as +:program:`wsgate`, is not provided here. However, more information can be found +at `cloudbase.it`__. + +The :program:`nova-compute` service requires the following options to configure +RDP console support. + +- :oslo.config:option:`rdp.enabled` +- :oslo.config:option:`rdp.html5_proxy_base_url` + +For example, to configure this via a ``nova.conf`` file: + +.. code-block:: ini + + [rdp] + enabled = True + html5_proxy_base_url = https://IP_ADDRESS:6083/ + +Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible +by the outside world. For example, this may be the management interface IP +address of the controller or the VIP. + +__ https://cloudbase.it/freerdp-html5-proxy-windows/ -- **Q: I want VNC support in the OpenStack dashboard. What services do I - need?** - A: You need ``nova-novncproxy``, ``nova-consoleauth``, and correctly - configured compute hosts. +MKS +--- -- **Q: When I use ``nova get-vnc-console`` or click on the VNC tab of the - OpenStack dashboard, it hangs. Why?** +MKS is the protocol used for accessing the console of a virtual machine running +on VMware vSphere. It is very similar to VNC. Due to the architecture of the +VMware vSphere hypervisor, it is not necessary to run a console proxy service. - A: Make sure you are running ``nova-consoleauth`` (in addition to - ``nova-novncproxy``). The proxies rely on ``nova-consoleauth`` to validate - tokens, and waits for a reply from them until a timeout is reached. +Configuration +~~~~~~~~~~~~~ + +To enable the MKS console service, only the :program:`nova-compute` service +must be configured. All options are defined in the :oslo.config:group:`mks` +group. + +The :program:`nova-compute` service requires the following options to configure +MKS console support. + +- :oslo.config:option:`mks.enabled` +- :oslo.config:option:`mks.mksproxy_base_url` + +For example, to configure this via a ``nova.conf`` file: + +.. code-block:: ini + + [mks] + enabled = True + mksproxy_base_url = https://127.0.0.1:6090/ + + +.. _about-nova-consoleauth: + +About ``nova-consoleauth`` +-------------------------- + +The now-removed :program:`nova-consoleauth` service was previously used to +provide a shared service to manage token authentication that the client proxies +outlined below could leverage. Token authentication was moved to the database in +18.0.0 (Rocky) and the service was removed in 20.0.0 (Train). + + +Frequently Asked Questions +-------------------------- + +- **Q: I want VNC support in the OpenStack dashboard. What services do I + need?** + + A: You need ``nova-novncproxy`` and correctly configured compute hosts. - **Q: My VNC proxy worked fine during my all-in-one test, but now it doesn't work on multi host. Why?** @@ -421,13 +561,12 @@ Frequently asked questions about VNC access to virtual machines Your ``nova-compute`` configuration file must set the following values: - .. code-block:: console + .. code-block:: ini [vnc] # These flags help construct a connection data structure server_proxyclient_address=192.168.1.2 novncproxy_base_url=http://172.24.1.1:6080/vnc_auto.html - xvpvncproxy_base_url=http://172.24.1.1:6081/console # This is the address where the underlying vncserver (not the proxy) # will listen for connections. @@ -435,11 +574,11 @@ Frequently asked questions about VNC access to virtual machines .. note:: - ``novncproxy_base_url`` and ``xvpvncproxy_base_url`` use a public IP; this - is the URL that is ultimately returned to clients, which generally do not - have access to your private network. Your PROXYSERVER must be able to - reach ``server_proxyclient_address``, because that is the address over - which the VNC connection is proxied. + ``novncproxy_base_url`` uses a public IP; this is the URL that is + ultimately returned to clients, which generally do not have access to your + private network. Your PROXYSERVER must be able to reach + ``server_proxyclient_address``, because that is the address over which the + VNC connection is proxied. - **Q: My noVNC does not work with recent versions of web browsers. Why?** @@ -456,7 +595,7 @@ Frequently asked questions about VNC access to virtual machines Modify the ``width`` and ``height`` options, as follows: - .. code-block:: console + .. code-block:: ini @@ -467,50 +606,9 @@ Frequently asked questions about VNC access to virtual machines console connections, make sure that the value of ``novncproxy_base_url`` is set explicitly where the ``nova-novncproxy`` service is running. -Serial Console --------------- - -The *serial console* feature [1]_ in nova is an alternative for graphical -consoles like *VNC*, *SPICE*, *RDP*. The example below uses these nodes: - -* controller node with IP ``192.168.50.100`` -* compute node 1 with IP ``192.168.50.104`` -* compute node 2 with IP ``192.168.50.105`` - -Here's the general flow of actions: - -.. figure:: figures/serial-console-flow.svg - :width: 100% - :alt: The serial console flow - -1. The user requests a serial console connection string for an instance - from the REST API. -2. The `nova-api` service asks the `nova-compute` service, which manages - that instance, to fulfill that request. -3. That connection string gets used by the user to connect to the - `nova-serialproxy` service. -4. The `nova-serialproxy` service then proxies the console interaction - to the port of the compute node where the instance is running. That - port gets forwarded by the hypervisor into the KVM guest. - -The config options for those nodes, which are in the section -``[serial_console]`` of your ``nova.conf``, are not intuitive at first. -Keep these things in mind: - -* The ``serialproxy_host`` is the address the `nova-serialproxy` service - listens to for incoming connections (see step 3). -* The ``serialproxy_port`` value must be the very same as in the URI - of ``base_url``. -* The ``base_url`` on the compute node will be part of the response the user - will get when asking for a serial console connection string (see step 1 - from above). This means it needs to be an URL the user can connect to. -* The ``proxyclient_address`` on the compute node will be used by the - `nova-serialproxy` service to determine where to connect to for - proxying the console interaction. References ---------- -.. [1] https://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html -.. [2] https://qemu.weilnetz.de/doc/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify -.. [3] https://tools.ietf.org/html/rfc3280#section-4.2.1.10 +.. [1] https://qemu.weilnetz.de/doc/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify +.. [2] https://tools.ietf.org/html/rfc3280#section-4.2.1.10 diff --git a/doc/source/admin/resource-limits.rst b/doc/source/admin/resource-limits.rst new file mode 100644 index 00000000000..c74ad31c17b --- /dev/null +++ b/doc/source/admin/resource-limits.rst @@ -0,0 +1,312 @@ +=============== +Resource Limits +=============== + +Nova supports configuring limits on individual resources including CPU, memory, +disk and network. These limits can be used to enforce basic Quality-of-Service +(QoS) policies on such resources. + +.. note:: + + Hypervisor-enforced resource limits are distinct from API-enforced user and + project quotas. For information on the latter, refer to :doc:`quotas`. + +.. warning:: + + This feature is poorly tested and poorly maintained. It may no longer work + as expected. Where possible, consider using the QoS policies provided by + other services, such as + :cinder-doc:`Cinder ` and + :neutron-doc:`Neutron `. + + +Configuring resource limits +--------------------------- + +Resource quota enforcement support is specific to the virt driver in use on +compute hosts. + +libvirt +~~~~~~~ + +The libvirt driver supports CPU, disk and VIF limits. Unfortunately all of +these work quite differently, as discussed below. + +CPU limits +^^^^^^^^^^ + +Libvirt enforces CPU limits in terms of *shares* and *quotas*, configured +via :nova:extra-spec:`quota:cpu_shares` and :nova:extra-spec:`quota:cpu_period` +/ :nova:extra-spec:`quota:cpu_quota`, respectively. Both are implemented using +the `cgroups v1 cpu controller`__. + +CPU shares are a proportional weighted share of total CPU resources relative to +other instances. It does not limit CPU usage if CPUs are not busy. There is no +unit and the value is purely relative to other instances, so an instance +configured with value of 2048 will get twice as much CPU time as a VM +configured with the value 1024. For example, to configure a CPU share of 1024 +for a flavor: + +.. code-block:: console + + $ openstack flavor set $FLAVOR --property quota:cpu_shares=1024 + +The CPU quotas require both a period and quota. The CPU period specifies the +enforcement interval in microseconds, while the CPU quota specifies the maximum +allowed bandwidth in microseconds that the each vCPU of the instance can +consume. The CPU period must be in the range 1000 (1mS) to 1,000,000 (1s) or 0 +(disabled). The CPU quota must be in the range 1000 (1mS) to 2^64 or 0 +(disabled). Where the CPU quota exceeds the CPU period, this means the guest +vCPU process is able to consume multiple pCPUs worth of bandwidth. For example, +to limit each guest vCPU to 1 pCPU worth of runtime per period: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property quota:cpu_period=1000 \ + --property quota:cpu_quota=1000 + +To limit each guest vCPU to 2 pCPUs worth of runtime per period: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property quota:cpu_period=1000 \ + --property quota:cpu_quota=2000 + +Finally, to limit each guest vCPU to 0.5 pCPUs worth of runtime per period: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property quota:cpu_period=1000 \ + --property quota:cpu_quota=500 + +.. note:: + + Smaller periods will ensure a consistent latency response at the expense of + burst capacity. + +CPU shares and CPU quotas can work hand-in-hand. For example, if two instances +were configured with :nova:extra-spec:`quota:cpu_shares`\ =1024 and +:nova:extra-spec:`quota:cpu_period`\ =100000 (100mS) for both, then configuring +both with a :nova:extra-spec:`quota:cpu_quota`\ =75000 (75mS) will result in +them sharing a host CPU equally, with both getting exactly 50mS of CPU time. +If instead only one instance gets :nova:extra-spec:`quota:cpu_quota`\ =75000 +(75mS) while the other gets :nova:extra-spec:`quota:cpu_quota`\ =25000 (25mS), +then the first will get 3/4 of the time per period. + +.. __: https://man7.org/linux/man-pages/man7/cgroups.7.html + +Memory Limits +^^^^^^^^^^^^^ + +The libvirt driver does not support memory limits. + +Disk I/O Limits +^^^^^^^^^^^^^^^ + +Libvirt enforces disk limits through maximum disk read, write and total bytes +per second, using the :nova:extra-spec:`quota:disk_read_bytes_sec`, +:nova:extra-spec:`quota:disk_write_bytes_sec` and +:nova:extra-spec:`quota:disk_total_bytes_sec` extra specs, respectively. It can +also enforce disk limits through maximum disk read, write and total I/O +operations per second, using the :nova:extra-spec:`quota:disk_read_iops_sec`, +:nova:extra-spec:`quota:disk_write_iops_sec` and +:nova:extra-spec:`quota:disk_total_iops_sec` extra specs, respectively. For +example, to set a maximum disk write of 10 MB/sec for a flavor: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property quota:disk_write_bytes_sec=10485760 + +Network bandwidth limits +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. warning:: + + These limits are enforced via libvirt and will only work where the network + is connect to the instance using a tap interface. It will not work for + things like :doc:`SR-IOV VFs `. + :neutron-doc:`Neutron's QoS policies ` should be + preferred wherever possible. + +Libvirt enforces network bandwidth limits through inbound and outbound average, +using the :nova:extra-spec:`quota:vif_inbound_average` and +:nova:extra-spec:`quota:vif_outbound_average` extra specs, respectively. +In addition, optional *peak* values, which specifies the maximum rate at which +a bridge can send data (kB/s), and *burst* values, which specifies the amount +of bytes that can be burst at peak speed (kilobytes), can be specified for both +inbound and outbound traffic, using the +:nova:extra-spec:`quota:vif_inbound_peak` / +:nova:extra-spec:`quota:vif_outbound_peak` and +:nova:extra-spec:`quota:vif_inbound_burst` / +:nova:extra-spec:`quota:vif_outbound_burst` extra specs, respectively. + +For example, to configure **outbound** traffic to an average of 262 Mbit/s +(32768 kB/s), a peak of 524 Mbit/s, and burst of 65536 kilobytes: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property quota:vif_outbound_average=32768 \ + --property quota:vif_outbound_peak=65536 \ + --property quota:vif_outbound_burst=65536 + +.. note:: + + The speed limit values in above example are specified in kilobytes/second, + whle the burst value is in kilobytes. + +VMWare +~~~~~~ + +In contrast to libvirt, the VMWare virt driver enforces resource limits using +consistent terminology, specifically through relative allocation levels, hard +upper limits and minimum reservations configured via, for example, the +:nova:extra-spec:`quota:cpu_shares_level` / +:nova:extra-spec:`quota:cpu_shares_share`, :nova:extra-spec:`quota:cpu_limit`, +and :nova:extra-spec:`quota:cpu_reservation` extra specs, respectively. + +Allocation levels can be specified using one of ``high``, ``normal``, ``low``, +or ``custom``. When ``custom`` is specified, the number of shares must be +specified using e.g. :nova:extra-spec:`quota:cpu_shares_share`. There is no +unit and the values are relative to other instances on the host. The upper +limits and reservations, by comparison, are measure in resource-specific units, +such as MHz for CPUs and will ensure that the instance never used more than or +gets less than the specified amount of the resource. + +CPU limits +^^^^^^^^^^ + +CPU limits are configured via the :nova:extra-spec:`quota:cpu_shares_level` / +:nova:extra-spec:`quota:cpu_shares_share`, :nova:extra-spec:`quota:cpu_limit`, +and :nova:extra-spec:`quota:cpu_reservation` extra specs. + +For example, to configure a CPU allocation level of ``custom`` with 1024 +shares: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --quota:cpu_shares_level=custom \ + --quota:cpu_shares_share=1024 + +To configure a minimum CPU allocation of 1024 MHz and a maximum of 2048 MHz: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --quota:cpu_reservation=1024 \ + --quota:cpu_limit=2048 + +Memory limits +^^^^^^^^^^^^^ + +Memory limits are configured via the +:nova:extra-spec:`quota:memory_shares_level` / +:nova:extra-spec:`quota:memory_shares_share`, +:nova:extra-spec:`quota:memory_limit`, and +:nova:extra-spec:`quota:memory_reservation` extra specs. + +For example, to configure a memory allocation level of ``custom`` with 1024 +shares: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --quota:memory_shares_level=custom \ + --quota:memory_shares_share=1024 + +To configure a minimum memory allocation of 1024 MB and a maximum of 2048 MB: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --quota:memory_reservation=1024 \ + --quota:memory_limit=2048 + +Disk I/O limits +^^^^^^^^^^^^^^^ + +Disk I/O limits are configured via the +:nova:extra-spec:`quota:disk_io_shares_level` / +:nova:extra-spec:`quota:disk_io_shares_share`, +:nova:extra-spec:`quota:disk_io_limit`, and +:nova:extra-spec:`quota:disk_io_reservation` extra specs. + +For example, to configure a disk I/O allocation level of ``custom`` with 1024 +shares: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --quota:disk_io_shares_level=custom \ + --quota:disk_io_shares_share=1024 + +To configure a minimum disk I/O allocation of 1024 MB and a maximum of 2048 MB: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --quota:disk_io_reservation=1024 \ + --quota:disk_io_limit=2048 + +Network bandwidth limits +^^^^^^^^^^^^^^^^^^^^^^^^ + +Network bandwidth limits are configured via the +:nova:extra-spec:`quota:vif_shares_level` / +:nova:extra-spec:`quota:vif_shares_share`, +:nova:extra-spec:`quota:vif_limit`, and +:nova:extra-spec:`quota:vif_reservation` extra specs. + +For example, to configure a network bandwidth allocation level of ``custom`` +with 1024 shares: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --quota:vif_shares_level=custom \ + --quota:vif_shares_share=1024 + +To configure a minimum bandwidth allocation of 1024 Mbits/sec and a maximum of +2048 Mbits/sec: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --quota:vif_reservation=1024 \ + --quota:vif_limit=2048 + +Hyper-V +~~~~~~~ + +CPU limits +^^^^^^^^^^ + +The Hyper-V driver does not support CPU limits. + +Memory limits +^^^^^^^^^^^^^ + +The Hyper-V driver does not support memory limits. + +Disk I/O limits +^^^^^^^^^^^^^^^ + +Hyper-V enforces disk limits through maximum total bytes and total I/O +operations per second, using the :nova:extra-spec:`quota:disk_total_bytes_sec` +and :nova:extra-spec:`quota:disk_total_iops_sec` extra specs, respectively. For +example, to set a maximum disk read/write of 10 MB/sec for a flavor: + +.. code-block:: console + + $ openstack flavor set $FLAVOR \ + --property quota:disk_total_bytes_sec=10485760 + +Network bandwidth limits +^^^^^^^^^^^^^^^^^^^^^^^^ + +The Hyper-V driver does not support network bandwidth limits. diff --git a/doc/source/admin/root-wrap-reference.rst b/doc/source/admin/root-wrap-reference.rst index b25b8200e8c..1a94d616c67 100644 --- a/doc/source/admin/root-wrap-reference.rst +++ b/doc/source/admin/root-wrap-reference.rst @@ -70,7 +70,7 @@ and is a Kilo release feature. Including this workaround in your configuration file safeguards your environment from issues that can impair root wrapper performance. Tool changes that have impacted `Python Build Reasonableness (PBR) -`__ for example, are a known +`__ for example, are a known issue that affects root wrapper performance. To set up this workaround, configure the ``disable_rootwrap`` option in the diff --git a/doc/source/admin/scheduling.rst b/doc/source/admin/scheduling.rst new file mode 100644 index 00000000000..0b93792ac7a --- /dev/null +++ b/doc/source/admin/scheduling.rst @@ -0,0 +1,1416 @@ +================== +Compute schedulers +================== + +Compute uses the ``nova-scheduler`` service to determine how to dispatch +compute requests. For example, the ``nova-scheduler`` service determines on +which host or node a VM should launch. You can configure the scheduler through +a variety of options. + +In the default configuration, this scheduler considers hosts that meet all the +following criteria: + +* Are in the requested :term:`Availability Zone` (``AvailabilityZoneFilter``). + +* Can service the request meaning the nova-compute service handling the target + node is available and not disabled (``ComputeFilter``). + +* Satisfy the extra specs associated with the instance type + (``ComputeCapabilitiesFilter``). + +* Satisfy any architecture, hypervisor type, or virtual machine mode properties + specified on the instance's image properties (``ImagePropertiesFilter``). + +* Are on a different host than other instances of a group (if requested) + (``ServerGroupAntiAffinityFilter``). + +* Are in a set of group hosts (if requested) (``ServerGroupAffinityFilter``). + +The scheduler chooses a new host when an instance is migrated, resized, +evacuated or unshelved after being shelve offloaded. + +When evacuating instances from a host, the scheduler service honors the target +host defined by the administrator on the :command:`nova evacuate` command. If +a target is not defined by the administrator, the scheduler determines the +target host. For information about instance evacuation, see +:ref:`Evacuate instances `. + + +.. _compute-scheduler-filters: + +Prefilters +---------- + +As of the Rocky release, the scheduling process includes a prefilter step to +increase the efficiency of subsequent stages. These *prefilters* are largely +optional and serve to augment the request that is sent to placement to reduce +the set of candidate compute hosts based on attributes that placement is able +to answer for us ahead of time. In addition to the prefilters listed here, also +see :ref:`tenant-isolation-with-placement` and +:ref:`availability-zones-with-placement`. + +Compute Image Type Support +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 20.0.0 (Train) + +Starting in the Train release, there is a prefilter available for +excluding compute nodes that do not support the ``disk_format`` of the +image used in a boot request. This behavior is enabled by setting +:oslo.config:option:`scheduler.query_placement_for_image_type_support` to +``True``. For example, the libvirt driver, when using ceph as an ephemeral +backend, does not support ``qcow2`` images (without an expensive conversion +step). In this case (and especially if you have a mix of ceph and +non-ceph backed computes), enabling this feature will ensure that the +scheduler does not send requests to boot a ``qcow2`` image to computes +backed by ceph. + +Compute Disabled Status Support +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 20.0.0 (Train) + +Starting in the Train release, there is a mandatory `pre-filter +`_ +which will exclude disabled compute nodes similar to (but does not fully +replace) the `ComputeFilter`_. Compute node resource providers with the +``COMPUTE_STATUS_DISABLED`` trait will be excluded as scheduling candidates. +The trait is managed by the ``nova-compute`` service and should mirror the +``disabled`` status on the related compute service record in the +`os-services`_ API. For example, if a compute service's status is ``disabled``, +the related compute node resource provider(s) for that service should have the +``COMPUTE_STATUS_DISABLED`` trait. When the service status is ``enabled`` the +``COMPUTE_STATUS_DISABLED`` trait shall be removed. + +If the compute service is down when the status is changed, the trait will be +synchronized by the compute service when it is restarted. Similarly, if an +error occurs when trying to add or remove the trait on a given resource +provider, the trait will be synchronized when the ``update_available_resource`` +periodic task runs - which is controlled by the +:oslo.config:option:`update_resources_interval` configuration option. + +.. _os-services: https://docs.openstack.org/api-ref/compute/#compute-services-os-services + +Isolate Aggregates +~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 20.0.0 (Train) + +Starting in the Train release, there is an optional placement pre-request filter +:doc:`/reference/isolate-aggregates` +When enabled, the traits required in the server's flavor and image must be at +least those required in an aggregate's metadata in order for the server to be +eligible to boot on hosts in that aggregate. + + +The Filter Scheduler +-------------------- + +.. versionchanged:: 23.0.0 (Wallaby) + + Support for custom filters was removed. Only the filter scheduler is now + supported by nova. + +Nova's scheduler, known as the *filter scheduler*, supports filtering and +weighting to make informed decisions on where a new instance should be created. + +When the scheduler receives a request for a resource, it first applies filters +to determine which hosts are eligible for consideration when dispatching a +resource. Filters are binary: either a host is accepted by the filter, or it is +rejected. Hosts that are accepted by the filter are then processed by a +different algorithm to decide which hosts to use for that request, described in +the :ref:`weights` section. + +**Filtering** + +.. figure:: /_static/images/filtering-workflow-1.png + +The :oslo.config:option:`filter_scheduler.available_filters` config option +provides the Compute service with the list of the filters that are available +for use by the scheduler. The default setting specifies all of the filters that +are included with the Compute service. This configuration option can be +specified multiple times. For example, if you implemented your own custom +filter in Python called ``myfilter.MyFilter`` and you wanted to use both the +built-in filters and your custom filter, your :file:`nova.conf` file would +contain: + +.. code-block:: ini + + [filter_scheduler] + available_filters = nova.scheduler.filters.all_filters + available_filters = myfilter.MyFilter + +The :oslo.config:option:`filter_scheduler.enabled_filters` configuration option +in ``nova.conf`` defines the list of filters that are applied by the +``nova-scheduler`` service. + + +Filters +------- + +The following sections describe the available compute filters. + +Filters are configured using the following config options: + +- :oslo.config:option:`filter_scheduler.available_filters` - Defines filter + classes made available to the scheduler. This setting can be used multiple + times. +- :oslo.config:option:`filter_scheduler.enabled_filters` - Of the available + filters, defines those that the scheduler uses by default. + +Each filter selects hosts in a different way and has different costs. The order +of :oslo.config:option:`filter_scheduler.enabled_filters` affects scheduling +performance. The general suggestion is to filter out invalid hosts as soon as +possible to avoid unnecessary costs. We can sort +:oslo.config:option:`filter_scheduler.enabled_filters` +items by their costs in reverse order. For example, ``ComputeFilter`` is better +before any resource calculating filters like ``NUMATopologyFilter``. + +In medium/large environments having AvailabilityZoneFilter before any +capability or resource calculating filters can be useful. + +.. _AggregateImagePropertiesIsolation: + +``AggregateImagePropertiesIsolation`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionchanged:: 12.0.0 (Liberty) + + Prior to 12.0.0 Liberty, it was possible to specify and use arbitrary + metadata with this filter. Starting in Liberty, nova only parses + :glance-doc:`standard metadata `. If + you wish to use arbitrary metadata, consider using the + :ref:`AggregateInstanceExtraSpecsFilter` filter instead. + +Matches properties defined in an image's metadata against those of aggregates +to determine host matches: + +* If a host belongs to an aggregate and the aggregate defines one or more + metadata that matches an image's properties, that host is a candidate to boot + the image's instance. + +* If a host does not belong to any aggregate, it can boot instances from all + images. + +For example, the following aggregate ``myWinAgg`` has the Windows operating +system as metadata (named 'windows'): + +.. code-block:: console + + $ openstack aggregate show myWinAgg + +-------------------+----------------------------+ + | Field | Value | + +-------------------+----------------------------+ + | availability_zone | zone1 | + | created_at | 2017-01-01T15:36:44.000000 | + | deleted | False | + | deleted_at | None | + | hosts | ['sf-devel'] | + | id | 1 | + | name | myWinAgg | + | properties | os_distro='windows' | + | updated_at | None | + +-------------------+----------------------------+ + +In this example, because the following Win-2012 image has the ``windows`` +property, it boots on the ``sf-devel`` host (all other filters being equal): + +.. code-block:: console + + $ openstack image show Win-2012 + +------------------+------------------------------------------------------+ + | Field | Value | + +------------------+------------------------------------------------------+ + | checksum | ee1eca47dc88f4879d8a229cc70a07c6 | + | container_format | bare | + | created_at | 2016-12-13T09:30:30Z | + | disk_format | qcow2 | + | ... | + | name | Win-2012 | + | ... | + | properties | os_distro='windows' | + | ... | + +You can configure the ``AggregateImagePropertiesIsolation`` filter by using the +following options in the ``nova.conf`` file: + +- :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_namespace` +- :oslo.config:option:`filter_scheduler.aggregate_image_properties_isolation_separator` + +.. note:: + + This filter has limitations as described in `bug 1677217 + `_ + which are addressed in placement :doc:`/reference/isolate-aggregates` + request filter. + +Refer to :doc:`/admin/aggregates` for more information. + + +.. _AggregateInstanceExtraSpecsFilter: + +``AggregateInstanceExtraSpecsFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Matches properties defined in extra specs for an instance type against +admin-defined properties on a host aggregate. Works with specifications that +are scoped with ``aggregate_instance_extra_specs``. Multiple values can be +given, as a comma-separated list. For backward compatibility, also works with +non-scoped specifications; this action is highly discouraged because it +conflicts with :ref:`ComputeCapabilitiesFilter` filter when you enable both +filters. + +Refer to :doc:`/admin/aggregates` for more information. + + +.. _AggregateIoOpsFilter: + +``AggregateIoOpsFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~ + +Filters host by disk allocation with a per-aggregate ``max_io_ops_per_host`` +value. If the per-aggregate value is not found, the value falls back to the +global setting defined by the +`:oslo.config:option:`filter_scheduler.max_io_ops_per_host` config option. +If the host is in more than one aggregate and more than one value is found, the +minimum value will be used. + +Refer to :doc:`/admin/aggregates` and :ref:`IoOpsFilter` for more information. + + +.. _AggregateMultiTenancyIsolation: + +``AggregateMultiTenancyIsolation`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Ensures hosts in tenant-isolated host aggregates will only be available to a +specified set of tenants. If a host is in an aggregate that has the +``filter_tenant_id`` metadata key, the host can build instances from only that +tenant or comma-separated list of tenants. A host can be in different +aggregates. If a host does not belong to an aggregate with the metadata key, +the host can build instances from all tenants. This does not restrict the +tenant from creating servers on hosts outside the tenant-isolated aggregate. + +For example, consider there are two available hosts for scheduling, ``HostA`` +and ``HostB``. ``HostB`` is in an aggregate isolated to tenant ``X``. A server +create request from tenant ``X`` will result in either ``HostA`` *or* ``HostB`` +as candidates during scheduling. A server create request from another tenant +``Y`` will result in only ``HostA`` being a scheduling candidate since +``HostA`` is not part of the tenant-isolated aggregate. + +.. note:: + + There is a `known limitation + `_ with the number of tenants + that can be isolated per aggregate using this filter. This limitation does + not exist, however, for the :ref:`tenant-isolation-with-placement` + filtering capability added in the 18.0.0 Rocky release. + + +.. _AggregateNumInstancesFilter: + +``AggregateNumInstancesFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Filters host in an aggregate by number of instances with a per-aggregate +``max_instances_per_host`` value. If the per-aggregate value is not found, the +value falls back to the global setting defined by the +:oslo.config:option:`filter_scheduler.max_instances_per_host` config option. +If the host is in more than one aggregate and thus more than one value is +found, the minimum value will be used. + +Refer to :doc:`/admin/aggregates` and :ref:`NumInstancesFilter` for more +information. + + +.. _AggregateTypeAffinityFilter: + +``AggregateTypeAffinityFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Filters hosts in an aggregate if the name of the instance's flavor matches that +of the ``instance_type`` key set in the aggregate's metadata or if the +``instance_type`` key is not set. + +The value of the ``instance_type`` metadata entry is a string that may contain +either a single ``instance_type`` name or a comma-separated list of +``instance_type`` names, such as ``m1.nano`` or ``m1.nano,m1.small``. + +.. note:: + + Instance types are a historical name for flavors. + +Refer to :doc:`/admin/aggregates` for more information. + + +``AllHostsFilter`` +~~~~~~~~~~~~~~~~~~ + +This is a no-op filter. It does not eliminate any of the available hosts. + + +.. _AvailabilityZoneFilter: + +``AvailabilityZoneFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Filters hosts by availability zone. It passes hosts matching the availability +zone specified in the instance properties. Use a comma to specify multiple +zones. The filter will then ensure it matches any zone specified. + +You must enable this filter for the scheduler to respect availability zones in +requests. + +Refer to :doc:`/admin/availability-zones` for more information. + +.. _ComputeCapabilitiesFilter: + +``ComputeCapabilitiesFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Filters hosts by matching properties defined in flavor extra specs against compute +capabilities. If an extra specs key contains a colon (``:``), anything before +the colon is treated as a namespace and anything after the colon is treated as +the key to be matched. If a namespace is present and is not ``capabilities``, +the filter ignores the namespace. +For example ``capabilities:cpu_info:features`` is a valid scope format. +For backward compatibility, the filter also treats the +extra specs key as the key to be matched if no namespace is present; this +action is highly discouraged because it conflicts with +:ref:`AggregateInstanceExtraSpecsFilter` filter when you enable both filters. + +The extra specifications can have an operator at the beginning of the value +string of a key/value pair. If there is no operator specified, then a +default operator of ``s==`` is used. Valid operators are: + +* ``=`` (equal to or greater than as a number; same as vcpus case) +* ``==`` (equal to as a number) +* ``!=`` (not equal to as a number) +* ``>=`` (greater than or equal to as a number) +* ``<=`` (less than or equal to as a number) +* ``s==`` (equal to as a string) +* ``s!=`` (not equal to as a string) +* ``s>=`` (greater than or equal to as a string) +* ``s>`` (greater than as a string) +* ``s<=`` (less than or equal to as a string) +* ``s<`` (less than as a string) +* ```` (substring) +* ```` (all elements contained in collection) +* ```` (find one of these) + +Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and +`` fpu gpu`` + +Some of attributes that can be used as useful key and their values contains: + +* ``free_ram_mb`` (compared with a number, values like ``>= 4096``) +* ``free_disk_mb`` (compared with a number, values like ``>= 10240``) +* ``host`` (compared with a string, values like `` compute``, ``s== compute_01``) +* ``hypervisor_type`` (compared with a string, values like ``s== QEMU``, ``s== powervm``) +* ``hypervisor_version`` (compared with a number, values like ``>= 1005003``, ``== 2000000``) +* ``num_instances`` (compared with a number, values like ``<= 10``) +* ``num_io_ops`` (compared with a number, values like ``<= 5``) +* ``vcpus_total`` (compared with a number, values like ``= 48``, ``>=24``) +* ``vcpus_used`` (compared with a number, values like ``= 0``, ``<= 10``) + +Some virt drivers support reporting CPU traits to the Placement service. With +that feature available, you should consider using traits in flavors instead of +``ComputeCapabilitiesFilter`` because traits provide consistent naming for CPU +features in some virt drivers and querying traits is efficient. For more +details, refer to :doc:`/user/support-matrix`, +:ref:`Required traits `, +:ref:`Forbidden traits ` and +`Report CPU features to the Placement service `_. + +Also refer to `Compute capabilities as traits`_. + + +.. _ComputeFilter: + +``ComputeFilter`` +----------------- + +Passes all hosts that are operational and enabled. + +In general, you should always enable this filter. + + +``DifferentHostFilter`` +----------------------- + +Schedules the instance on a different host from a set of instances. To take +advantage of this filter, the requester must pass a scheduler hint, using +``different_host`` as the key and a list of instance UUIDs as the value. This +filter is the opposite of the ``SameHostFilter``. + +For example, when using the :command:`openstack server create` command, use the +``--hint`` flag: + +.. code-block:: console + + $ openstack server create \ + --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ + --hint different_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \ + --hint different_host=8c19174f-4220-44f0-824a-cd1eeef10287 \ + server-1 + +With the API, use the ``os:scheduler_hints`` key. For example: + +.. code-block:: json + + { + "server": { + "name": "server-1", + "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", + "flavorRef": "1" + }, + "os:scheduler_hints": { + "different_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ] + } + } + + +.. _ImagePropertiesFilter: + +``ImagePropertiesFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Filters hosts based on properties defined on the instance's image. It passes +hosts that can support the specified image properties contained in the +instance. Properties include the architecture, hypervisor type, hypervisor +version, and virtual machine mode. + +For example, an instance might require a host that runs an ARM-based processor, +and QEMU as the hypervisor. You can decorate an image with these properties by +using: + +.. code-block:: console + + $ openstack image set --architecture arm --property img_hv_type=qemu \ + img-uuid + +The image properties that the filter checks for are: + +``hw_architecture`` + Describes the machine architecture required by the image. Examples are + ``i686``, ``x86_64``, ``arm``, and ``ppc64``. + + .. versionchanged:: 12.0.0 (Liberty) + + This was previously called ``architecture``. + +``img_hv_type`` + Describes the hypervisor required by the image. Examples are ``qemu`` + and ``hyperv``. + + .. note:: + + ``qemu`` is used for both QEMU and KVM hypervisor types. + + .. versionchanged:: 12.0.0 (Liberty) + + This was previously called ``hypervisor_type``. + +``img_hv_requested_version`` + Describes the hypervisor version required by the image. The property is + supported for HyperV hypervisor type only. It can be used to enable support for + multiple hypervisor versions, and to prevent instances with newer HyperV tools + from being provisioned on an older version of a hypervisor. If available, the + property value is compared to the hypervisor version of the compute host. + + To filter the hosts by the hypervisor version, add the + ``img_hv_requested_version`` property on the image as metadata and pass an + operator and a required hypervisor version as its value: + + .. code-block:: console + + $ openstack image set --property hypervisor_type=hyperv --property \ + hypervisor_version_requires=">=6000" img-uuid + + .. versionchanged:: 12.0.0 (Liberty) + + This was previously called ``hypervisor_version_requires``. + +``hw_vm_mode`` + describes the hypervisor application binary interface (ABI) required by the + image. Examples are ``xen`` for Xen 3.0 paravirtual ABI, ``hvm`` for native + ABI, and ``exe`` for container virt executable ABI. + + .. versionchanged:: 12.0.0 (Liberty) + + This was previously called ``vm_mode``. + + +``IsolatedHostsFilter`` +~~~~~~~~~~~~~~~~~~~~~~~ + +Allows the admin to define a special (isolated) set of images and a special +(isolated) set of hosts, such that the isolated images can only run on the +isolated hosts, and the isolated hosts can only run isolated images. The flag +``restrict_isolated_hosts_to_isolated_images`` can be used to force isolated +hosts to only run isolated images. + +The logic within the filter depends on the +``restrict_isolated_hosts_to_isolated_images`` config option, which defaults +to True. When True, a volume-backed instance will not be put on an isolated +host. When False, a volume-backed instance can go on any host, isolated or +not. + +The admin must specify the isolated set of images and hosts using the +:oslo.config:option:`filter_scheduler.isolated_hosts` and +:oslo.config:option:`filter_scheduler.isolated_images` config options. +For example: + +.. code-block:: ini + + [filter_scheduler] + isolated_hosts = server1, server2 + isolated_images = 342b492c-128f-4a42-8d3a-c5088cf27d13, ebd267a6-ca86-4d6c-9a0e-bd132d6b7d09 + +You can also specify that isolated host only be used for specific isolated +images using the +:oslo.config:option:`filter_scheduler.restrict_isolated_hosts_to_isolated_images` +config option. + + +.. _IoOpsFilter: + +``IoOpsFilter`` +~~~~~~~~~~~~~~~ + +Filters hosts by concurrent I/O operations on it. Hosts with too many +concurrent I/O operations will be filtered out. The +:oslo.config:option:`filter_scheduler.max_io_ops_per_host` option specifies the +maximum number of I/O intensive instances allowed to run on a host. +A host will be ignored by the scheduler if more than +:oslo.config:option:`filter_scheduler.max_io_ops_per_host` instances in build, +resize, snapshot, migrate, rescue or unshelve task states are running on it. + + +``JsonFilter`` +~~~~~~~~~~~~~~~ + +.. warning:: + + This filter is not enabled by default and not comprehensively + tested, and thus could fail to work as expected in non-obvious ways. + Furthermore, the filter variables are based on attributes of the + `HostState`_ class which could change from release to release so usage + of this filter is generally not recommended. Consider using other filters + such as the :ref:`ImagePropertiesFilter` or + :ref:`traits-based scheduling `. + +Allows a user to construct a custom filter by passing a +scheduler hint in JSON format. The following operators are supported: + +* ``=`` +* ``<`` +* ``>`` +* ``in`` +* ``<=`` +* ``>=`` +* ``not`` +* ``or`` +* ``and`` + +Unlike most other filters that rely on information provided via scheduler +hints, this filter filters on attributes in the `HostState`_ class such as the +following variables: + +* ``$free_ram_mb`` +* ``$free_disk_mb`` +* ``$hypervisor_hostname`` +* ``$total_usable_ram_mb`` +* ``$vcpus_total`` +* ``$vcpus_used`` + +Using the :command:`openstack server create` command, use the ``--hint`` flag: + +.. code-block:: console + + $ openstack server create --image 827d564a-e636-4fc4-a376-d36f7ebe1747 \ + --flavor 1 --hint query='[">=","$free_ram_mb",1024]' server1 + +With the API, use the ``os:scheduler_hints`` key: + +.. code-block:: json + + { + "server": { + "name": "server-1", + "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", + "flavorRef": "1" + }, + "os:scheduler_hints": { + "query": "[\">=\",\"$free_ram_mb\",1024]" + } + } + +.. _HostState: https://opendev.org/openstack/nova/src/branch/master/nova/scheduler/host_manager.py + + +``MetricsFilter`` +~~~~~~~~~~~~~~~~~ + +Use in collaboration with the ``MetricsWeigher`` weigher. Filters hosts that +do not report the metrics specified in +:oslo.config:option:`metrics.weight_setting`, thus ensuring the metrics +weigher will not fail due to these hosts. + + +.. _NUMATopologyFilter: + +``NUMATopologyFilter`` +~~~~~~~~~~~~~~~~~~~~~~ + +Filters hosts based on the NUMA topology that was specified for the instance +through the use of flavor ``extra_specs`` in combination with the image +properties, as described in detail in :doc:`/admin/cpu-topologies`. The filter +will try to match the exact NUMA cells of the instance to those of the host. It +will consider the standard over-subscription limits for each host NUMA cell, +and provide limits to the compute host accordingly. + +This filter is essential if using instances with features that rely on NUMA, +such as instance NUMA topologies or CPU pinning. + +.. note:: + + If instance has no topology defined, it will be considered for any host. If + instance has a topology defined, it will be considered only for NUMA capable + hosts. + + +.. _NumInstancesFilter: + +``NumInstancesFilter`` +~~~~~~~~~~~~~~~~~~~~~~ + +Filters hosts based on the number of instances running on them. Hosts that have +more instances running than specified by the +:oslo.config:option:`filter_scheduler.max_instances_per_host` config option are +filtered out. + + +.. _PciPassthroughFilter: + +``PciPassthroughFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~ + +The filter schedules instances on a host if the host has devices that meet the +device requests in the ``extra_specs`` attribute for the flavor. + +This filter is essential if using instances with PCI device requests or where +SR-IOV-based networking is in use on hosts. + + +``SameHostFilter`` +~~~~~~~~~~~~~~~~~~ + +Schedules an instance on the same host as all other instances in a set of +instances. To take advantage of this filter, the requester must pass a +scheduler hint, using ``same_host`` as the key and a list of instance UUIDs as +the value. This filter is the opposite of the ``DifferentHostFilter``. + +For example, when using the :command:`openstack server create` command, use the +``--hint`` flag: + +.. code-block:: console + + $ openstack server create \ + --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ + --hint same_host=a0cf03a5-d921-4877-bb5c-86d26cf818e1 \ + --hint same_host=8c19174f-4220-44f0-824a-cd1eeef10287 \ + server-1 + +With the API, use the ``os:scheduler_hints`` key: + +.. code-block:: json + + { + "server": { + "name": "server-1", + "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", + "flavorRef": "1" + }, + "os:scheduler_hints": { + "same_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ] + } + } + + +.. _ServerGroupAffinityFilter: + +``ServerGroupAffinityFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Restricts instances belonging to a server group to the same host(s). To take +advantage of this filter, the requester must create a server group with an +``affinity`` policy, and pass a scheduler hint, using ``group`` as the key and +the server group UUID as the value. + +For example, when using the :command:`openstack server create` command, use the +``--hint`` flag: + +.. code-block:: console + + $ openstack server group create --policy affinity group-1 + $ openstack server create --image IMAGE_ID --flavor 1 \ + --hint group=SERVER_GROUP_UUID server-1 + + +.. _ServerGroupAntiAffinityFilter: + +``ServerGroupAntiAffinityFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Restricts instances belonging to a server group to separate hosts. +To take advantage of this filter, the requester must create a +server group with an ``anti-affinity`` policy, and pass a scheduler hint, using +``group`` as the key and the server group UUID as the value. + +For example, when using the :command:`openstack server create` command, use the +``--hint`` flag: + +.. code-block:: console + + $ openstack server group create --policy anti-affinity group-1 + $ openstack server create --image IMAGE_ID --flavor 1 \ + --hint group=SERVER_GROUP_UUID server-1 + + +``SimpleCIDRAffinityFilter`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. todo:: + + Does this filter still work with neutron? + +Schedules the instance based on host IP subnet range. To take advantage of +this filter, the requester must specify a range of valid IP address in CIDR +format, by passing two scheduler hints: + +``build_near_host_ip`` + The first IP address in the subnet (for example, ``192.168.1.1``) + +``cidr`` + The CIDR that corresponds to the subnet (for example, ``/24``) + +When using the :command:`openstack server create` command, use the ``--hint`` +flag. For example, to specify the IP subnet ``192.168.1.1/24``: + +.. code-block:: console + + $ openstack server create \ + --image cedef40a-ed67-4d10-800e-17455edce175 --flavor 1 \ + --hint build_near_host_ip=192.168.1.1 --hint cidr=/24 \ + server-1 + +With the API, use the ``os:scheduler_hints`` key: + +.. code-block:: json + + { + "server": { + "name": "server-1", + "imageRef": "cedef40a-ed67-4d10-800e-17455edce175", + "flavorRef": "1" + }, + "os:scheduler_hints": { + "build_near_host_ip": "192.168.1.1", + "cidr": "24" + } + } + + +.. _weights: + +Weights +------- + +.. figure:: /_static/images/nova-weighting-hosts.png + +When resourcing instances, the filter scheduler filters and weights each host +in the list of acceptable hosts. Each time the scheduler selects a host, it +virtually consumes resources on it and subsequent selections are adjusted +accordingly. This process is useful when the customer asks for the same large +amount of instances because a weight is computed for each requested instance. + +In order to prioritize one weigher against another, all the weighers have to +define a multiplier that will be applied before computing the weight for a node. +All the weights are normalized beforehand so that the multiplier can be applied +easily.Therefore the final weight for the object will be:: + + weight = w1_multiplier * norm(w1) + w2_multiplier * norm(w2) + ... + +Hosts are weighted based on the following config options: + +- :oslo.config:option:`filter_scheduler.host_subset_size` +- :oslo.config:option:`filter_scheduler.weight_classes` + + +``RAMWeigher`` +~~~~~~~~~~~~~~ + +Compute weight based on available RAM on the compute node. +Sort with the largest weight winning. If the multiplier, +:oslo.config:option:`filter_scheduler.ram_weight_multiplier`, is negative, the +host with least RAM available will win (useful for stacking hosts, instead +of spreading). + +Starting with the Stein release, if per-aggregate value with the key +``ram_weight_multiplier`` is found, this +value would be chosen as the ram weight multiplier. Otherwise, it will fall +back to the :oslo.config:option:`filter_scheduler.ram_weight_multiplier`. +If more than one value is found for a host in aggregate metadata, the minimum +value will be used. + + +``CPUWeigher`` +~~~~~~~~~~~~~~ + +Compute weight based on available vCPUs on the compute node. +Sort with the largest weight winning. If the multiplier, +:oslo.config:option:`filter_scheduler.cpu_weight_multiplier`, is negative, the +host with least CPUs available will win (useful for stacking hosts, instead +of spreading). + +Starting with the Stein release, if per-aggregate value with the key +``cpu_weight_multiplier`` is found, this +value would be chosen as the cpu weight multiplier. Otherwise, it will fall +back to the :oslo.config:option:`filter_scheduler.cpu_weight_multiplier`. If +more than one value is found for a host in aggregate metadata, the minimum +value will be used. + + +``DiskWeigher`` +~~~~~~~~~~~~~~~ + +Hosts are weighted and sorted by free disk space with the +largest weight winning. If the multiplier is negative, the host with less disk +space available will win (useful for stacking hosts, instead of spreading). + +Starting with the Stein release, if per-aggregate value with the key +``disk_weight_multiplier`` is found, this +value would be chosen as the disk weight multiplier. Otherwise, it will fall +back to the :oslo.config:option:`filter_scheduler.disk_weight_multiplier`. If +more than one value is found for a host in aggregate metadata, the minimum value +will be used. + + +``MetricsWeigher`` +~~~~~~~~~~~~~~~~~~ + +This weigher can compute the weight based on the compute node +host's various metrics. The to-be weighed metrics and their weighing ratio +are specified using the :oslo.config:option:`metrics.weight_setting` config +option. For example: + +.. code-block:: ini + + [metrics] + weight_setting = name1=1.0, name2=-1.0 + +You can specify the metrics that are required, along with the weight of those +that are not and are not available using the +:oslo.config:option:`metrics.required` and +:oslo.config:option:`metrics.weight_of_unavailable` config options, +respectively. + +Starting with the Stein release, if per-aggregate value with the key +`metrics_weight_multiplier` is found, this value would be chosen as the +metrics weight multiplier. Otherwise, it will fall back to the +:oslo.config:option:`metrics.weight_multiplier`. If more than +one value is found for a host in aggregate metadata, the minimum value will +be used. + + +``IoOpsWeigher`` +~~~~~~~~~~~~~~~~ + +The weigher can compute the weight based on the compute node +host's workload. The default is to preferably choose light workload compute +hosts. If the multiplier is positive, the weigher prefer choosing heavy +workload compute hosts, the weighing has the opposite effect of the default. + +Starting with the Stein release, if per-aggregate value with the key +``io_ops_weight_multiplier`` is found, this +value would be chosen as the IO ops weight multiplier. Otherwise, it will fall +back to the :oslo.config:option:`filter_scheduler.io_ops_weight_multiplier`. +If more than one value is found for a host in aggregate metadata, the minimum +value will be used. + +``PCIWeigher`` +~~~~~~~~~~~~~~ + +Compute a weighting based on the number of PCI devices on the +host and the number of PCI devices requested by the instance. For example, +given three hosts - one with a single PCI device, one with many PCI devices, +and one with no PCI devices - nova should prioritise these differently based +on the demands of the instance. If the instance requests a single PCI device, +then the first of the hosts should be preferred. Similarly, if the instance +requests multiple PCI devices, then the second of these hosts would be +preferred. Finally, if the instance does not request a PCI device, then the +last of these hosts should be preferred. + +For this to be of any value, at least one of the :ref:`PciPassthroughFilter` or +:ref:`NUMATopologyFilter` filters must be enabled. + +Starting with the Stein release, if per-aggregate value with the key +``pci_weight_multiplier`` is found, this +value would be chosen as the pci weight multiplier. Otherwise, it will fall +back to the :oslo.config:option:`filter_scheduler.pci_weight_multiplier`. +If more than one value is found for a host in aggregate metadata, the +minimum value will be used. + +.. important:: + + Only positive values are allowed for the multiplier of this weigher as a + negative value would force non-PCI instances away from non-PCI hosts, thus, + causing future scheduling issues. + +``ServerGroupSoftAffinityWeigher`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The weigher can compute the weight based +on the number of instances that run on the same server group. The largest +weight defines the preferred host for the new instance. For the multiplier +only a positive value is allowed for the calculation. + +Starting with the Stein release, if per-aggregate value with the key +``soft_affinity_weight_multiplier`` is +found, this value would be chosen as the soft affinity weight multiplier. +Otherwise, it will fall back to the +:oslo.config:option:`filter_scheduler.soft_affinity_weight_multiplier`. +If more than one value is found for a host in aggregate metadata, the +minimum value will be used. + +``ServerGroupSoftAntiAffinityWeigher`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The weigher can compute the weight based on the number of instances that run on +the same server group as a negative value. The largest weight defines the +preferred host for the new instance. For the multiplier only a positive value +is allowed for the calculation. + +Starting with the Stein release, if per-aggregate value with the key +``soft_anti_affinity_weight_multiplier`` is found, this value would be chosen +as the soft anti-affinity weight multiplier. Otherwise, it will fall back to +the +:oslo.config:option:`filter_scheduler.soft_anti_affinity_weight_multiplier`. +If more than one value is found for a host in aggregate metadata, the minimum +value will be used. + +``BuildFailureWeigher`` +~~~~~~~~~~~~~~~~~~~~~~~ + +Weigh hosts by the number of recent failed boot attempts. +It considers the build failure counter and can negatively weigh hosts with +recent failures. This avoids taking computes fully out of rotation. + +Starting with the Stein release, if per-aggregate value with the key +``build_failure_weight_multiplier`` is found, this value would be chosen as the +build failure weight multiplier. Otherwise, it will fall back to the +:oslo.config:option:`filter_scheduler.build_failure_weight_multiplier`. If +more than one value is found for a host in aggregate metadata, the minimum +value will be used. + +.. _cross-cell-weigher: + +``CrossCellWeigher`` +~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 21.0.0 (Ussuri) + +Weighs hosts based on which cell they are in. "Local" cells are preferred when +moving an instance. Use configuration option +:oslo.config:option:`filter_scheduler.cross_cell_move_weight_multiplier` to +control the weight. If per-aggregate value with the key +`cross_cell_move_weight_multiplier` is found, this value would be chosen as the +cross-cell move weight multiplier. Otherwise, it will fall back to the +:oslo.config:option:`filter_scheduler.cross_cell_move_weight_multiplier`. If +more than one value is found for a host in aggregate metadata, the minimum +value will be used. + + +Utilization-aware scheduling +---------------------------- + +.. warning:: + + This feature is poorly tested and may not work as expected. It may be + removed in a future release. Use at your own risk. + +It is possible to schedule instances using advanced scheduling decisions. These +decisions are made based on enhanced usage statistics encompassing data like +memory cache utilization, memory bandwidth utilization, or network bandwidth +utilization. This is disabled by default. The administrator can configure how +the metrics are weighted in the configuration file by using the +:oslo.config:option:`metrics.weight_setting` config option. For example to +configure ``metric1`` with ``ratio1`` and ``metric2`` with ``ratio2``: + +.. code-block:: ini + + [metrics] + weight_setting = "metric1=ratio1, metric2=ratio2" + + +Allocation ratios +----------------- + +Allocation ratios allow for the overcommit of host resources. +The following configuration options exist to control allocation ratios +per compute node to support this overcommit of resources: + +* :oslo.config:option:`cpu_allocation_ratio` allows overriding the ``VCPU`` + inventory allocation ratio for a compute node +* :oslo.config:option:`ram_allocation_ratio` allows overriding the ``MEMORY_MB`` + inventory allocation ratio for a compute node +* :oslo.config:option:`disk_allocation_ratio` allows overriding the ``DISK_GB`` + inventory allocation ratio for a compute node + +Prior to the 19.0.0 Stein release, if left unset, the ``cpu_allocation_ratio`` +defaults to 16.0, the ``ram_allocation_ratio`` defaults to 1.5, and the +``disk_allocation_ratio`` defaults to 1.0. + +Starting with the 19.0.0 Stein release, the following configuration options +control the initial allocation ratio values for a compute node: + +* :oslo.config:option:`initial_cpu_allocation_ratio` the initial VCPU + inventory allocation ratio for a new compute node record, defaults to 16.0 +* :oslo.config:option:`initial_ram_allocation_ratio` the initial MEMORY_MB + inventory allocation ratio for a new compute node record, defaults to 1.5 +* :oslo.config:option:`initial_disk_allocation_ratio` the initial DISK_GB + inventory allocation ratio for a new compute node record, defaults to 1.0 + +Scheduling considerations +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The allocation ratio configuration is used both during reporting of compute +node `resource provider inventory`_ to the placement service and during +scheduling. + +.. _resource provider inventory: https://docs.openstack.org/api-ref/placement/?expanded=#resource-provider-inventories + +Usage scenarios +~~~~~~~~~~~~~~~ + +Since allocation ratios can be set via nova configuration, host aggregate +metadata and the placement API, it can be confusing to know which should be +used. This really depends on your scenario. A few common scenarios are detailed +here. + +1. When the deployer wants to **always** set an override value for a resource + on a compute node, the deployer should ensure that the + :oslo.config:option:`DEFAULT.cpu_allocation_ratio`, + :oslo.config:option:`DEFAULT.ram_allocation_ratio` and + :oslo.config:option:`DEFAULT.disk_allocation_ratio` configuration options + are set to a non-None value. + This will make the ``nova-compute`` service overwrite any externally-set + allocation ratio values set via the placement REST API. + +2. When the deployer wants to set an **initial** value for a compute node + allocation ratio but wants to allow an admin to adjust this afterwards + without making any configuration file changes, the deployer should set the + :oslo.config:option:`DEFAULT.initial_cpu_allocation_ratio`, + :oslo.config:option:`DEFAULT.initial_ram_allocation_ratio` and + :oslo.config:option:`DEFAULT.initial_disk_allocation_ratio` configuration + options and then manage the allocation ratios using the placement REST API + (or `osc-placement`_ command line interface). + For example: + + .. code-block:: console + + $ openstack resource provider inventory set \ + --resource VCPU:allocation_ratio=1.0 \ + --amend 815a5634-86fb-4e1e-8824-8a631fee3e06 + +3. When the deployer wants to **always** use the placement API to set + allocation ratios, then the deployer should ensure that the + :oslo.config:option:`DEFAULT.cpu_allocation_ratio`, + :oslo.config:option:`DEFAULT.ram_allocation_ratio` and + :oslo.config:option:`DEFAULT.disk_allocation_ratio` configuration options + are set to a None and then manage the allocation ratios using the placement + REST API (or `osc-placement`_ command line interface). + + This scenario is the workaround for + `bug 1804125 `_. + +.. versionchanged:: 19.0.0 (Stein) + + The :oslo.config:option:`DEFAULT.initial_cpu_allocation_ratio`, + :oslo.config:option:`DEFAULT.initial_ram_allocation_ratio` and + :oslo.config:option:`DEFAULT.initial_disk_allocation_ratio` configuration + options were introduced in Stein. Prior to this release, setting any of + :oslo.config:option:`DEFAULT.cpu_allocation_ratio`, + :oslo.config:option:`DEFAULT.ram_allocation_ratio` or + :oslo.config:option:`DEFAULT.disk_allocation_ratio` to a non-null value + would ensure the user-configured value was always overriden. + +.. _osc-placement: https://docs.openstack.org/osc-placement/latest/index.html + +.. _hypervisor-specific-considerations: + +Hypervisor-specific considerations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Nova provides three configuration options that can be used to set aside some +number of resources that will not be consumed by an instance, whether these +resources are overcommitted or not: + +- :oslo.config:option:`reserved_host_cpus`, +- :oslo.config:option:`reserved_host_memory_mb` +- :oslo.config:option:`reserved_host_disk_mb` + +Some virt drivers may benefit from the use of these options to account for +hypervisor-specific overhead. + +HyperV + Hyper-V creates a VM memory file on the local disk when an instance starts. + The size of this file corresponds to the amount of RAM allocated to the + instance. + + You should configure the + :oslo.config:option:`reserved_host_disk_mb` config option to + account for this overhead, based on the amount of memory available + to instances. + + +Cells considerations +-------------------- + +By default cells are enabled for scheduling new instances but they can be +disabled (new schedules to the cell are blocked). This may be useful for +users while performing cell maintenance, failures or other interventions. It is +to be noted that creating pre-disabled cells and enabling/disabling existing +cells should either be followed by a restart or SIGHUP of the nova-scheduler +service for the changes to take effect. + +Command-line interface +~~~~~~~~~~~~~~~~~~~~~~ + +The :command:`nova-manage` command-line client supports the cell-disable +related commands. To enable or disable a cell, use +:command:`nova-manage cell_v2 update_cell` and to create pre-disabled cells, +use :command:`nova-manage cell_v2 create_cell`. See the +:ref:`man-page-cells-v2` man page for details on command usage. + + +.. _compute-capabilities-as-traits: + +Compute capabilities as traits +------------------------------ + +.. versionadded:: 19.0.0 (Stein) + +The ``nova-compute`` service will report certain ``COMPUTE_*`` traits based on +its compute driver capabilities to the placement service. The traits will be +associated with the resource provider for that compute service. These traits +can be used during scheduling by configuring flavors with +:ref:`Required traits ` or +:ref:`Forbidden traits `. For example, if you +have a host aggregate with a set of compute nodes that support multi-attach +volumes, you can restrict a flavor to that aggregate by adding the +``trait:COMPUTE_VOLUME_MULTI_ATTACH=required`` extra spec to the flavor and +then restrict the flavor to the aggregate +:ref:`as normal `. + +Here is an example of a libvirt compute node resource provider that is +exposing some CPU features as traits, driver capabilities as traits, and a +custom trait denoted by the ``CUSTOM_`` prefix: + +.. code-block:: console + + $ openstack --os-placement-api-version 1.6 resource provider trait list \ + > d9b3dbc4-50e2-42dd-be98-522f6edaab3f --sort-column name + +---------------------------------------+ + | name | + +---------------------------------------+ + | COMPUTE_DEVICE_TAGGING | + | COMPUTE_NET_ATTACH_INTERFACE | + | COMPUTE_NET_ATTACH_INTERFACE_WITH_TAG | + | COMPUTE_TRUSTED_CERTS | + | COMPUTE_VOLUME_ATTACH_WITH_TAG | + | COMPUTE_VOLUME_EXTEND | + | COMPUTE_VOLUME_MULTI_ATTACH | + | CUSTOM_IMAGE_TYPE_RBD | + | HW_CPU_X86_MMX | + | HW_CPU_X86_SSE | + | HW_CPU_X86_SSE2 | + | HW_CPU_X86_SVM | + +---------------------------------------+ + +**Rules** + +There are some rules associated with capability-defined traits. + +1. The compute service "owns" these traits and will add/remove them when the + ``nova-compute`` service starts and when the ``update_available_resource`` + periodic task runs, with run intervals controlled by config option + :oslo.config:option:`update_resources_interval`. + +2. The compute service will not remove any custom traits set on the resource + provider externally, such as the ``CUSTOM_IMAGE_TYPE_RBD`` trait in the + example above. + +3. If compute-owned traits are removed from the resource provider externally, + for example by running ``openstack resource provider trait delete ``, + the compute service will add its traits again on restart or SIGHUP. + +4. If a compute trait is set on the resource provider externally which is not + supported by the driver, for example by adding the ``COMPUTE_VOLUME_EXTEND`` + trait when the driver does not support that capability, the compute service + will automatically remove the unsupported trait on restart or SIGHUP. + +5. Compute capability traits are standard traits defined in the `os-traits`_ + library. + +.. _os-traits: https://opendev.org/openstack/os-traits/src/branch/master/os_traits/compute + +:ref:`Further information on capabilities and traits +` can be found in the +:doc:`Technical Reference Deep Dives section `. + + +.. _custom-scheduler-filters: + +Writing Your Own Filter +----------------------- + +To create **your own filter**, you must inherit from |BaseHostFilter| and +implement one method: ``host_passes``. This method should return ``True`` if a +host passes the filter and return ``False`` elsewhere. It takes two parameters: + +* the ``HostState`` object allows to get attributes of the host +* the ``RequestSpec`` object describes the user request, including the flavor, + the image and the scheduler hints + +For further details about each of those objects and their corresponding +attributes, refer to the codebase (at least by looking at the other filters +code) or ask for help in the ``#openstack-nova`` IRC channel. + +In addition, if your custom filter uses non-standard extra specs, you must +register validators for these extra specs. Examples of validators can be found +in the ``nova.api.validation.extra_specs`` module. These should be registered +via the ``nova.api.extra_spec_validator`` `entrypoint`__. + +The module containing your custom filter(s) must be packaged and available in +the same environment(s) that the nova controllers, or specifically the +:program:`nova-scheduler` and :program:`nova-api` services, are available in. +As an example, consider the following sample package, which is the `minimal +structure`__ for a standard, setuptools-based Python package: + +.. code-block:: none + + acmefilter/ + acmefilter/ + __init__.py + validators.py + setup.py + +Where ``__init__.py`` contains: + +.. code-block:: python + + from oslo_log import log as logging + from nova.scheduler import filters + + LOG = logging.getLogger(__name__) + + class AcmeFilter(filters.BaseHostFilter): + + def host_passes(self, host_state, spec_obj): + extra_spec = spec_obj.flavor.extra_specs.get('acme:foo') + LOG.info("Extra spec value was '%s'", extra_spec) + + # do meaningful stuff here... + + return True + +``validators.py`` contains: + +.. code-block:: python + + from nova.api.validation.extra_specs import base + + def register(): + validators = [ + base.ExtraSpecValidator( + name='acme:foo', + description='My custom extra spec.' + value={ + 'type': str, + 'enum': [ + 'bar', + 'baz', + ], + }, + ), + ] + + return validators + +``setup.py`` contains: + +.. code-block:: python + + from setuptools import setup + + setup( + name='acmefilter', + version='0.1', + description='My custom filter', + packages=[ + 'acmefilter' + ], + entry_points={ + 'nova.api.extra_spec_validators': [ + 'acme = acmefilter.validators', + ], + }, + ) + +To enable this, you would set the following in :file:`nova.conf`: + +.. code-block:: ini + + [filter_scheduler] + available_filters = nova.scheduler.filters.all_filters + available_filters = acmefilter.AcmeFilter + enabled_filters = ComputeFilter,AcmeFilter + +.. note:: + + You **must** add custom filters to the list of available filters using the + :oslo.config:option:`filter_scheduler.available_filters` config option in + addition to enabling them via the + :oslo.config:option:`filter_scheduler.enabled_filters` config option. The + default ``nova.scheduler.filters.all_filters`` value for the former only + includes the filters shipped with nova. + +With these settings, all of the standard nova filters and the custom +``AcmeFilter`` filter are available to the scheduler, but just the +``ComputeFilter`` and ``AcmeFilter`` will be used on each request. + +__ https://packaging.python.org/specifications/entry-points/ +__ https://python-packaging.readthedocs.io/en/latest/minimal.html + +Writing your own weigher +------------------------ + +To create your own weigher, you must inherit from |BaseHostWeigher| +A weigher can implement both the ``weight_multiplier`` and ``_weight_object`` +methods or just implement the ``weight_objects`` method. ``weight_objects`` +method is overridden only if you need access to all objects in order to +calculate weights, and it just return a list of weights, and not modify the +weight of the object directly, since final weights are normalized and computed +by ``weight.BaseWeightHandler``. + + +.. |BaseHostFilter| replace:: :class:`BaseHostFilter ` +.. |BaseHostWeigher| replace:: :class:`BaseHostFilter ` diff --git a/doc/source/admin/secure-boot.rst b/doc/source/admin/secure-boot.rst new file mode 100644 index 00000000000..3e2ccb084b0 --- /dev/null +++ b/doc/source/admin/secure-boot.rst @@ -0,0 +1,136 @@ +=========== +Secure Boot +=========== + +.. versionadded:: 14.0.0 (Newton) + +.. versionchanged:: 23.0.0 (Wallaby) + + Added support for Secure Boot to the libvirt driver. + +Nova supports configuring `UEFI Secure Boot`__ for guests. Secure Boot aims to +ensure no unsigned kernel code runs on a machine. + +.. __: https://en.wikipedia.org/wiki/Secure_boot + + +Enabling Secure Boot +-------------------- + +Currently the configuration of UEFI guest bootloaders is only supported when +using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` +of ``kvm`` or ``qemu`` or when using the Hyper-V compute driver with certain +machine types. In both cases, it requires the guests also be configured with a +:doc:`UEFI bootloader `. + +With these requirements satisfied, you can verify UEFI Secure Boot support by +inspecting the traits on the compute node's resource provider: + +.. code:: bash + + $ COMPUTE_UUID=$(openstack resource provider list --name $HOST -f value -c uuid) + $ openstack resource provider trait list $COMPUTE_UUID | grep COMPUTE_SECURITY_UEFI_SECURE_BOOT + | COMPUTE_SECURITY_UEFI_SECURE_BOOT | + + +Configuring a flavor or image +----------------------------- + +Configuring UEFI Secure Boot for guests varies depending on the compute driver +in use. In all cases, a :doc:`UEFI guest bootloader ` must be configured +for the guest but there are also additional requirements depending on the +compute driver in use. + +.. rubric:: Libvirt + +As the name would suggest, UEFI Secure Boot requires that a UEFI bootloader be +configured for guests. When this is done, UEFI Secure Boot support can be +configured using the :nova:extra-spec:`os:secure_boot` extra spec or equivalent +image metadata property. For example, to configure an image that meets both of +these requirements: + +.. code-block:: bash + + $ openstack image set \ + --property hw_firmware_type=uefi \ + --property os_secure_boot=required \ + $IMAGE + +.. note:: + + On x86_64 hosts, enabling secure boot also requires configuring use of the + Q35 machine type. This can be configured on a per-guest basis using the + ``hw_machine_type`` image metadata property or automatically for all guests + created on a host using the :oslo.config:option:`libvirt.hw_machine_type` + config option. + +It is also possible to explicitly request that secure boot be disabled. This is +the default behavior, so this request is typically useful when an admin wishes +to explicitly prevent a user requesting secure boot by uploading their own +image with relevant image properties. For example, to disable secure boot via +the flavor: + +.. code-block:: bash + + $ openstack flavor set --property os:secure_boot=disabled $FLAVOR + +Finally, it is possible to request that secure boot be enabled if the host +supports it. This is only possible via the image metadata property. When this +is requested, secure boot will only be enabled if the host supports this +feature and the other constraints, namely that a UEFI guest bootloader is +configured, are met. For example: + +.. code-block:: bash + + $ openstack image set --property os_secure_boot=optional $IMAGE + +.. note:: + + If both the image metadata property and flavor extra spec are provided, + they must match. If they do not, an error will be raised. + +.. rubric:: Hyper-V + +Like libvirt, configuring a guest for UEFI Secure Boot support also requires +that it be configured with a UEFI bootloader: As noted in :doc:`uefi`, it is +not possible to do this explicitly in Hyper-V. Rather, you should configure the +guest to use the *Generation 2* machine type. In addition to this, the Hyper-V +compute driver also requires that the OS type be configured. + +When both of these constraints are met, you can configure UEFI Secure Boot +support using the :nova:extra-spec:`os:secure_boot` extra spec or equivalent +image metadata property. For example, to configure an image that meets all the +above requirements: + +.. code-block:: bash + + $ openstack image set \ + --property hw_machine_type=hyperv-gen2 \ + --property os_type=windows \ + --property os_secure_boot=required \ + $IMAGE + +As with the libvirt driver, it is also possible to request that secure boot be +disabled. This is the default behavior, so this is typically useful when an +admin wishes to explicitly prevent a user requesting secure boot. For example, +to disable secure boot via the flavor: + +.. code-block:: bash + + $ openstack flavor set --property os:secure_boot=disabled $IMAGE + +However, unlike the libvirt driver, the Hyper-V driver does not respect the +``optional`` value for the image metadata property. If this is configured, it +will be silently ignored. + + +References +---------- + +* `Allow Secure Boot (SB) for QEMU- and KVM-based guests (spec)`__ +* `Securing Secure Boot with System Management Mode`__ +* `Generation 2 virtual machine security settings for Hyper-V`__ + +.. __: https://specs.openstack.org/openstack/nova-specs/specs/wallaby/approved/allow-secure-boot-for-qemu-kvm-guests.html +.. __: http://events17.linuxfoundation.org/sites/events/files/slides/kvmforum15-smm.pdf +.. __: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/learn-more/generation-2-virtual-machine-security-settings-for-hyper-v diff --git a/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst new file mode 100644 index 00000000000..0e6206d0b1f --- /dev/null +++ b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst @@ -0,0 +1,198 @@ +========================================== +Secure live migration with QEMU-native TLS +========================================== + +Context +~~~~~~~ + +The encryption offered by nova's +:oslo.config:option:`libvirt.live_migration_tunnelled` does not secure +all the different migration streams of a nova instance, namely: guest +RAM, device state, and disks (via NBD) when using non-shared storage. +Further, the "tunnelling via libvirtd" has inherent limitations: (a) it +cannot handle live migration of disks in a non-shared storage setup +(a.k.a. "block migration"); and (b) has a huge performance overhead and +latency, because it burns more CPU and memory bandwidth due to increased +number of data copies on both source and destination hosts. + +To solve this existing limitation, QEMU and libvirt have gained (refer +:ref:`below ` for version details) support for "native +TLS", i.e. TLS built into QEMU. This will secure all data transports, +including disks that are not on shared storage, without incurring the +limitations of the "tunnelled via libvirtd" transport. + +To take advantage of the "native TLS" support in QEMU and libvirt, nova +has introduced new configuration attribute +:oslo.config:option:`libvirt.live_migration_with_native_tls`. + + +.. _`Prerequisites`: + +Prerequisites +~~~~~~~~~~~~~ + +(1) Version requirement: This feature needs at least libvirt 4.4.0 and + QEMU 2.11. + +(2) A pre-configured TLS environment—i.e. CA, server, and client + certificates, their file permissions, et al—must be "correctly" + configured (typically by an installer tool) on all relevant compute + nodes. To simplify your PKI (Public Key Infrastructure) setup, use + deployment tools that take care of handling all the certificate + lifecycle management. For example, refer to the "`TLS everywhere + `__" + guide from the TripleO project. + +(3) Password-less SSH setup for all relevant compute nodes. + +(4) On all relevant compute nodes, ensure the TLS-related config + attributes in ``/etc/libvirt/qemu.conf`` are in place:: + + default_tls_x509_cert_dir = "/etc/pki/qemu" + default_tls_x509_verify = 1 + + If it is not already configured, modify ``/etc/sysconfig/libvirtd`` + on both (ComputeNode1 & ComputeNode2) to listen for TCP/IP + connections:: + + LIBVIRTD_ARGS="--listen" + + Then, restart the libvirt daemon (also on both nodes):: + + $ systemctl restart libvirtd + + Refer to the "`Related information`_" section on a note about the + other TLS-related configuration attributes in + ``/etc/libvirt/qemu.conf``. + + +Validating your TLS environment on compute nodes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Assuming you have two compute hosts (``ComputeNode1``, and +``ComputeNode2``) run the :command:`virt-pki-validate` tool (comes with +the ``libvirt-client`` package on your Linux distribution) on both the +nodes to ensure all the necessary PKI files are configured are +configured:: + + [ComputeNode1]$ virt-pki-validate + Found /usr/bin/certtool + Found CA certificate /etc/pki/CA/cacert.pem for TLS Migration Test + Found client certificate /etc/pki/libvirt/clientcert.pem for ComputeNode1 + Found client private key /etc/pki/libvirt/private/clientkey.pem + Found server certificate /etc/pki/libvirt/servercert.pem for ComputeNode1 + Found server private key /etc/pki/libvirt/private/serverkey.pem + Make sure /etc/sysconfig/libvirtd is setup to listen to + TCP/IP connections and restart the libvirtd service + + [ComputeNode2]$ virt-pki-validate + Found /usr/bin/certtool + Found CA certificate /etc/pki/CA/cacert.pem for TLS Migration Test + Found client certificate /etc/pki/libvirt/clientcert.pem for ComputeNode2 + Found client private key /etc/pki/libvirt/private/clientkey.pem + Found server certificate /etc/pki/libvirt/servercert.pem for ComputeNode2 + Found server private key /etc/pki/libvirt/private/serverkey.pem + Make sure /etc/sysconfig/libvirtd is setup to listen to + TCP/IP connections and restart the libvirtd service + + +Other TLS environment related checks on compute nodes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**IMPORTANT**: Ensure that the permissions of certificate files and keys +in ``/etc/pki/qemu/*`` directory on both source *and* destination +compute nodes to be the following ``0640`` with ``root:qemu`` as the +group/user. For example, on a Fedora-based system:: + + $ ls -lasrtZ /etc/pki/qemu + total 32 + 0 drwxr-xr-x. 10 root root system_u:object_r:cert_t:s0 110 Dec 10 10:39 .. + 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1464 Dec 10 11:08 ca-cert.pem + 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1558 Dec 10 11:08 server-cert.pem + 4 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 1619 Dec 10 11:09 client-cert.pem + 8 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 8180 Dec 10 11:09 client-key.pem + 8 -rw-r-----. 1 root qemu unconfined_u:object_r:cert_t:s0 8177 Dec 11 05:35 server-key.pem + 0 drwxr-xr-x. 2 root root unconfined_u:object_r:cert_t:s0 146 Dec 11 06:01 . + + +Performing the migration +~~~~~~~~~~~~~~~~~~~~~~~~ + +(1) On all relevant compute nodes, enable the + :oslo.config:option:`libvirt.live_migration_with_native_tls` + configuration attribute and set the + :oslo.config:option:`libvirt.live_migration_scheme` + configuration attribute to tls:: + + [libvirt] + live_migration_with_native_tls = true + live_migration_scheme = tls + + .. note:: + Setting both + :oslo.config:option:`libvirt.live_migration_with_native_tls` and + :oslo.config:option:`libvirt.live_migration_tunnelled` at the + same time is invalid (and disallowed). + + .. note:: + Not setting + :oslo.config:option:`libvirt.live_migration_scheme` to ``tls`` + will result in libvirt using the unencrypted TCP connection + without displaying any error or a warning in the logs. + + And restart the ``nova-compute`` service:: + + $ systemctl restart openstack-nova-compute + +(2) Now that all TLS-related configuration is in place, migrate guests + (with or without shared storage) from ``ComputeNode1`` to + ``ComputeNode2``. Refer to the :doc:`live-migration-usage` document + on details about live migration. + + +.. _`Related information`: + +Related information +~~~~~~~~~~~~~~~~~~~ + +- If you have the relevant libvirt and QEMU versions (mentioned in the + "`Prerequisites`_" section earlier), then using the + :oslo.config:option:`libvirt.live_migration_with_native_tls` is + strongly recommended over the more limited + :oslo.config:option:`libvirt.live_migration_tunnelled` option, which + is intended to be deprecated in future. + + +- There are in total *nine* TLS-related config options in + ``/etc/libvirt/qemu.conf``:: + + default_tls_x509_cert_dir + default_tls_x509_verify + nbd_tls + nbd_tls_x509_cert_dir + migrate_tls_x509_cert_dir + + vnc_tls_x509_cert_dir + spice_tls_x509_cert_dir + vxhs_tls_x509_cert_dir + chardev_tls_x509_cert_dir + + If you set both ``default_tls_x509_cert_dir`` and + ``default_tls_x509_verify`` parameters for all certificates, there is + no need to specify any of the other ``*_tls*`` config options. + + The intention (of libvirt) is that you can just use the + ``default_tls_x509_*`` config attributes so that you don't need to set + any other ``*_tls*`` parameters, _unless_ you need different + certificates for some services. The rationale for that is that some + services (e.g. migration / NBD) are only exposed to internal + infrastructure; while some sevices (VNC, Spice) might be exposed + publically, so might need different certificates. For OpenStack this + does not matter, though, we will stick with the defaults. + +- If they are not already open, ensure you open up these TCP ports on + your firewall: ``16514`` (where the authenticated and encrypted TCP/IP + socket will be listening on) and ``49152-49215`` (for regular + migration) on all relevant compute nodes. (Otherwise you get + ``error: internal error: unable to execute QEMU command + 'drive-mirror': Failed to connect socket: No route to host``). diff --git a/doc/source/admin/security-groups.rst b/doc/source/admin/security-groups.rst index 9a55ef2800e..4419111fe75 100644 --- a/doc/source/admin/security-groups.rst +++ b/doc/source/admin/security-groups.rst @@ -12,31 +12,17 @@ that has no other defined security group. Unless you change the default, this security group denies all incoming traffic and allows only outgoing traffic to your instance. -You can use the ``allow_same_net_traffic`` option in the -``/etc/nova/nova.conf`` file to globally control whether the rules apply to -hosts which share a network. There are two possible values: - -``True`` (default) - Hosts on the same subnet are not filtered and are allowed to pass all types - of traffic between them. On a flat network, this allows all instances from - all projects unfiltered communication. With VLAN networking, this allows - access between instances within the same project. You can also simulate this - setting by configuring the default security group to allow all traffic from - the subnet. - -``False`` - Security groups are enforced for all connections. - -Additionally, the number of maximum rules per security group is controlled by -the ``security_group_rules`` and the number of allowed security groups per -project is controlled by the ``security_groups`` quota (see -:ref:`manage-quotas`). +Security groups (and their quota) are managed by :neutron-doc:`Neutron, the +networking service `. -List and view current security groups -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Working with security groups +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From the command-line you can get a list of security groups for the project, -using the :command:`openstack` and :command:`nova` commands: +using the :command:`openstack` commands. + +List and view current security groups +------------------------------------- #. Ensure your system variables are set for the user and project for which you are checking security group rules. For example: @@ -83,7 +69,7 @@ using the :command:`openstack` and :command:`nova` commands: allowed from all IPs. Create a security group -~~~~~~~~~~~~~~~~~~~~~~~ +----------------------- When adding a new security group, you should pick a descriptive but brief name. This name shows up in brief descriptions of the instances that use it where the @@ -203,7 +189,7 @@ or "secgrp1". +--------------------------------------+-------------+-----------+-----------------+-----------------------+ Delete a security group -~~~~~~~~~~~~~~~~~~~~~~~ +----------------------- #. Ensure your system variables are set for the user and project for which you are deleting a security group. @@ -221,7 +207,7 @@ Delete a security group $ openstack security group delete global_http Create security group rules for a cluster of instances -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------------ Source Groups are a special, dynamic way of defining the CIDR of allowed sources. The user specifies a Source Group (Security Group name), and all the diff --git a/doc/source/admin/security.rst b/doc/source/admin/security.rst index 515e91bed1d..5743023e739 100644 --- a/doc/source/admin/security.rst +++ b/doc/source/admin/security.rst @@ -38,3 +38,22 @@ encryption in the ``metadata_agent.ini`` file. .. code-block:: ini nova_client_priv_key = PATH_TO_KEY + + +Securing live migration streams with QEMU-native TLS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is strongly recommended to secure all the different live migration +streams of a nova instance—i.e. guest RAM, device state, and disks (via +NBD) when using non-shared storage. For further details on how to set +this up, refer to the +:doc:`secure-live-migration-with-qemu-native-tls` document. + + +Mitigation for MDS (Microarchitectural Data Sampling) security flaws +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is strongly recommended to patch all compute nodes and nova instances +against the processor-related security flaws, such as MDS (and other +previous vulnerabilities). For details on applying mitigation for the +MDS flaws, refer to :ref:`mitigation-for-Intel-MDS-security-flaws`. diff --git a/doc/source/admin/services.rst b/doc/source/admin/services.rst index 74ee25273f1..a3f59e1fdfb 100644 --- a/doc/source/admin/services.rst +++ b/doc/source/admin/services.rst @@ -10,22 +10,13 @@ enable the ``nova-compute`` service. .. code-block:: console $ openstack compute service list - +----+--------------+------------+----------+---------+-------+--------------+ - | ID | Binary | Host | Zone | Status | State | Updated At | - +----+--------------+------------+----------+---------+-------+--------------+ - | 4 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | consoleauth | | | | | 0:44:48.0000 | - | | | | | | | 00 | - | 5 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | scheduler | | | | | 0:44:48.0000 | - | | | | | | | 00 | - | 6 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | conductor | | | | | 0:44:54.0000 | - | | | | | | | 00 | - | 9 | nova-compute | compute | nova | enabled | up | 2016-10-21T0 | - | | | | | | | 2:35:03.0000 | - | | | | | | | 00 | - +----+--------------+------------+----------+---------+-------+--------------+ + +----+----------------+------------+----------+---------+-------+----------------------------+ + | ID | Binary | Host | Zone | Status | State | Updated At | + +----+----------------+------------+----------+---------+-------+----------------------------+ + | 4 | nova-scheduler | controller | internal | enabled | up | 2016-12-20T00:44:48.000000 | + | 5 | nova-conductor | controller | internal | enabled | up | 2016-12-20T00:44:54.000000 | + | 8 | nova-compute | compute | nova | enabled | up | 2016-10-21T02:35:03.000000 | + +----+----------------+------------+----------+---------+-------+----------------------------+ #. Disable a nova service: @@ -43,22 +34,13 @@ enable the ``nova-compute`` service. .. code-block:: console $ openstack compute service list - +----+--------------+------------+----------+---------+-------+--------------+ - | ID | Binary | Host | Zone | Status | State | Updated At | - +----+--------------+------------+----------+---------+-------+--------------+ - | 4 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | consoleauth | | | | | 0:44:48.0000 | - | | | | | | | 00 | - | 5 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | scheduler | | | | | 0:44:48.0000 | - | | | | | | | 00 | - | 6 | nova- | controller | internal | enabled | up | 2016-12-20T0 | - | | conductor | | | | | 0:44:54.0000 | - | | | | | | | 00 | - | 9 | nova-compute | compute | nova | disabled| up | 2016-10-21T0 | - | | | | | | | 2:35:03.0000 | - | | | | | | | 00 | - +----+--------------+------------+----------+---------+-------+--------------+ + +----+----------------+------------+----------+---------+-------+----------------------------+ + | ID | Binary | Host | Zone | Status | State | Updated At | + +----+----------------+------------+----------+---------+-------+----------------------------+ + | 5 | nova-scheduler | controller | internal | enabled | up | 2016-12-20T00:44:48.000000 | + | 6 | nova-conductor | controller | internal | enabled | up | 2016-12-20T00:44:54.000000 | + | 9 | nova-compute | compute | nova | disabled| up | 2016-10-21T02:35:03.000000 | + +----+----------------+------------+----------+---------+-------+----------------------------+ #. Enable the service: diff --git a/doc/source/admin/sev.rst b/doc/source/admin/sev.rst new file mode 100644 index 00000000000..62588070afe --- /dev/null +++ b/doc/source/admin/sev.rst @@ -0,0 +1,279 @@ +.. _amd-sev: + +AMD SEV (Secure Encrypted Virtualization) +========================================= + +.. versionadded:: 20.0.0 (Train) + +`Secure Encrypted Virtualization (SEV)`__ is a technology from AMD which +enables the memory for a VM to be encrypted with a key unique to the VM. +SEV is particularly applicable to cloud computing since it can reduce the +amount of trust VMs need to place in the hypervisor and administrator of +their host system. + +.. __: https://developer.amd.com/sev/ + + +.. _deploying-sev-capable-infrastructure: + +Enabling SEV +------------ + +First the operator will need to ensure the following prerequisites are met: + +- Currently SEV is only supported when using the libvirt compute driver with a + :oslo.config:option:`libvirt.virt_type` of ``kvm`` or ``qemu``. + +- At least one of the Nova compute hosts must be AMD hardware capable + of supporting SEV. It is entirely possible for the compute plane to + be a mix of hardware which can and cannot support SEV, although as + per the section on `Permanent limitations`_ below, the maximum + number of simultaneously running guests with SEV will be limited by + the quantity and quality of SEV-capable hardware available. + +In order for users to be able to use SEV, the operator will need to +perform the following steps: + +- Ensure that sufficient memory is reserved on the SEV compute hosts + for host-level services to function correctly at all times. This is + particularly important when hosting SEV-enabled guests, since they + pin pages in RAM, preventing any memory overcommit which may be in + normal operation on other compute hosts. + + It is `recommended`__ to achieve this by configuring an ``rlimit`` at + the ``/machine.slice`` top-level ``cgroup`` on the host, with all VMs + placed inside that. (For extreme detail, see `this discussion on the + spec`__.) + + __ http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html#memory-reservation-solutions + __ https://review.opendev.org/#/c/641994/2/specs/train/approved/amd-sev-libvirt-support.rst@167 + + An alternative approach is to configure the + :oslo.config:option:`reserved_host_memory_mb` option in the + ``[DEFAULT]`` section of :file:`nova.conf`, based on the expected + maximum number of SEV guests simultaneously running on the host, and + the details provided in `an earlier version of the AMD SEV spec`__ + regarding memory region sizes, which cover how to calculate it + correctly. + + __ https://specs.openstack.org/openstack/nova-specs/specs/stein/approved/amd-sev-libvirt-support.html#proposed-change + + See `the Memory Locking and Accounting section of the AMD SEV spec`__ + and `previous discussion for further details`__. + + __ http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html#memory-locking-and-accounting + __ https://review.opendev.org/#/c/641994/2/specs/train/approved/amd-sev-libvirt-support.rst@167 + +- A cloud administrator will need to define one or more SEV-enabled + flavors :ref:`as described below `, unless it + is sufficient for users to define SEV-enabled images. + +Additionally the cloud operator should consider the following optional +steps: + +.. _num_memory_encrypted_guests: + +- Configure the :oslo.config:option:`libvirt.num_memory_encrypted_guests` + option in :file:`nova.conf` to represent the number of guests an SEV + compute node can host concurrently with memory encrypted at the + hardware level. For example: + + .. code-block:: ini + + [libvirt] + num_memory_encrypted_guests = 15 + + This option exists because on AMD SEV-capable hardware, the memory + controller has a fixed number of slots for holding encryption keys, + one per guest. For example, at the time of writing, earlier + generations of hardware only have 15 slots, thereby limiting the + number of SEV guests which can be run concurrently to 15. Nova + needs to track how many slots are available and used in order to + avoid attempting to exceed that limit in the hardware. + + At the time of writing (September 2019), work is in progress to + allow QEMU and libvirt to expose the number of slots available on + SEV hardware; however until this is finished and released, it will + not be possible for Nova to programmatically detect the correct + value. + + So this configuration option serves as a stop-gap, allowing the + cloud operator the option of providing this value manually. It may + later be demoted to a fallback value for cases where the limit + cannot be detected programmatically, or even removed altogether when + Nova's minimum QEMU version guarantees that it can always be + detected. + + .. note:: + + When deciding whether to use the default of ``None`` or manually + impose a limit, operators should carefully weigh the benefits + vs. the risk. The benefits of using the default are a) immediate + convenience since nothing needs to be done now, and b) convenience + later when upgrading compute hosts to future versions of Nova, + since again nothing will need to be done for the correct limit to + be automatically imposed. However the risk is that until + auto-detection is implemented, users may be able to attempt to + launch guests with encrypted memory on hosts which have already + reached the maximum number of guests simultaneously running with + encrypted memory. This risk may be mitigated by other limitations + which operators can impose, for example if the smallest RAM + footprint of any flavor imposes a maximum number of simultaneously + running guests which is less than or equal to the SEV limit. + +- Configure :oslo.config:option:`ram_allocation_ratio` on all SEV-capable + compute hosts to ``1.0``. Use of SEV requires locking guest memory, meaning + it is not possible to overcommit host memory. + + Alternatively, you can explicitly configure small pages for instances using + the :nova:extra-spec:`hw:mem_page_size` flavor extra spec and equivalent + image metadata property. For more information, see :doc:`huge-pages`. + +- Configure :oslo.config:option:`libvirt.hw_machine_type` on all + SEV-capable compute hosts to include ``x86_64=q35``, so that all + x86_64 images use the ``q35`` machine type by default. (Currently + Nova defaults to the ``pc`` machine type for the ``x86_64`` + architecture, although `it is expected that this will change in the + future`__.) + + Changing the default from ``pc`` to ``q35`` makes the creation and + configuration of images by users more convenient by removing the + need for the ``hw_machine_type`` property to be set to ``q35`` on + every image for which SEV booting is desired. + + .. caution:: + + Consider carefully whether to set this option. It is + particularly important since a limitation of the implementation + prevents the user from receiving an error message with a helpful + explanation if they try to boot an SEV guest when neither this + configuration option nor the image property are set to select + a ``q35`` machine type. + + On the other hand, setting it to ``q35`` may have other + undesirable side-effects on other images which were expecting to + be booted with ``pc``, so it is suggested to set it on a single + compute node or aggregate, and perform careful testing of typical + images before rolling out the setting to all SEV-capable compute + hosts. + + __ https://bugs.launchpad.net/nova/+bug/1780138 + + +.. _extra-specs-memory-encryption: + +Configuring a flavor or image +----------------------------- + +Once an operator has covered the above steps, users can launch SEV +instances either by requesting a flavor for which the operator set the +:nova:extra-spec:`hw:mem_encryption` extra spec to ``True``, or by using an +image with the ``hw_mem_encryption`` property set to ``True``. For example, to +enable SEV for a flavor: + +.. code-block:: console + + $ openstack flavor set FLAVOR-NAME \ + --property hw:mem_encryption=true + +These do not inherently cause a preference for SEV-capable hardware, +but for now SEV is the only way of fulfilling the requirement for +memory encryption. However in the future, support for other +hardware-level guest memory encryption technology such as Intel MKTME +may be added. If a guest specifically needs to be booted using SEV +rather than any other memory encryption technology, it is possible to +ensure this by setting the :nova:extra-spec:`trait{group}:HW_CPU_X86_AMD_SEV` +extra spec or equivalent image metadata property to ``required``. + +In all cases, SEV instances can only be booted from images which have +the ``hw_firmware_type`` property set to ``uefi``, and only when the +machine type is set to ``q35``. This can be set per image by setting +the image property ``hw_machine_type=q35``, or per compute node by +the operator via :oslo.config:option:`libvirt.hw_machine_type` as +explained above. + + +Limitations +----------- + +Impermanent limitations +~~~~~~~~~~~~~~~~~~~~~~~ + +The following limitations may be removed in the future as the +hardware, firmware, and various layers of software receive new +features: + +- SEV-encrypted VMs cannot yet be live-migrated or suspended, + therefore they will need to be fully shut down before migrating off + an SEV host, e.g. if maintenance is required on the host. + +- SEV-encrypted VMs cannot contain directly accessible host devices + (PCI passthrough). So for example mdev vGPU support will not + currently work. However technologies based on `vhost-user`__ should + work fine. + + __ https://wiki.qemu.org/Features/VirtioVhostUser + +- The boot disk of SEV-encrypted VMs can only be ``virtio``. + (``virtio-blk`` is typically the default for libvirt disks on x86, + but can also be explicitly set e.g. via the image property + ``hw_disk_bus=virtio``). Valid alternatives for the disk + include using ``hw_disk_bus=scsi`` with + ``hw_scsi_model=virtio-scsi`` , or ``hw_disk_bus=sata``. + +- QEMU and libvirt cannot yet expose the number of slots available for + encrypted guests in the memory controller on SEV hardware. Until + this is implemented, it is not possible for Nova to programmatically + detect the correct value. As a short-term workaround, operators can + optionally manually specify the upper limit of SEV guests for each + compute host, via the new + :oslo.config:option:`libvirt.num_memory_encrypted_guests` + configuration option :ref:`described above + `. + +Permanent limitations +~~~~~~~~~~~~~~~~~~~~~ + +The following limitations are expected long-term: + +- The number of SEV guests allowed to run concurrently will always be + limited. `On the first generation of EPYC machines it will be + limited to 15 guests`__; however this limit becomes much higher with + the second generation (Rome). + + __ https://www.redhat.com/archives/libvir-list/2019-January/msg00652.html + +- The operating system running in an encrypted virtual machine must + contain SEV support. + +Non-limitations +~~~~~~~~~~~~~~~ + +For the sake of eliminating any doubt, the following actions are *not* +expected to be limited when SEV encryption is used: + +- Cold migration or shelve, since they power off the VM before the + operation at which point there is no encrypted memory (although this + could change since there is work underway to add support for `PMEM + `_) + +- Snapshot, since it only snapshots the disk + +- ``nova evacuate`` (despite the name, more akin to resurrection than + evacuation), since this is only initiated when the VM is no longer + running + +- Attaching any volumes, as long as they do not require attaching via + an IDE bus + +- Use of spice / VNC / serial / RDP consoles + +- :doc:`VM guest virtual NUMA ` + + +References +---------- + +- `libvirt driver launching AMD SEV-encrypted instances (spec)`__ + +.. __: http://specs.openstack.org/openstack/nova-specs/specs/train/approved/amd-sev-libvirt-support.html diff --git a/doc/source/admin/ssh-configuration.rst b/doc/source/admin/ssh-configuration.rst index f7e054fdc69..5adff142924 100644 --- a/doc/source/admin/ssh-configuration.rst +++ b/doc/source/admin/ssh-configuration.rst @@ -6,7 +6,7 @@ Configure SSH between compute nodes .. todo:: - Consider merging this into a larger "live-migration" document or to the + Consider merging this into a larger "migration" document or to the installation guide If you are resizing or migrating an instance between hypervisors, you might @@ -14,6 +14,12 @@ encounter an SSH (Permission denied) error. Ensure that each node is configured with SSH key authentication so that the Compute service can use SSH to move disks to other nodes. +.. note:: + + It is not necessary that all the compute nodes share the same key pair. + However for the ease of the configuration, this document only utilizes a + single key pair for communication between compute nodes. + To share a key pair between compute nodes, complete the following steps: #. On the first node, obtain a key pair (public key and private key). Use the @@ -28,14 +34,15 @@ To share a key pair between compute nodes, complete the following steps: # usermod -s /bin/bash nova - Switch to the nova account. + Ensure you can switch to the nova account: .. code-block:: console - # su nova + # su - nova #. As root, create the folder that is needed by SSH and place the private key - that you obtained in step 1 into this folder: + that you obtained in step 1 into this folder, and add the pub key to the + authorized_keys file: .. code-block:: console @@ -43,29 +50,20 @@ To share a key pair between compute nodes, complete the following steps: cp /var/lib/nova/.ssh/id_rsa echo 'StrictHostKeyChecking no' >> /var/lib/nova/.ssh/config chmod 600 /var/lib/nova/.ssh/id_rsa /var/lib/nova/.ssh/authorized_keys + echo >> /var/lib/nova/.ssh/authorized_keys -#. Repeat steps 2-4 on each node. - - .. note:: - - The nodes must share the same key pair, so do not generate a new key pair - for any subsequent nodes. - -#. From the first node, where you created the SSH key, run: +#. Copy the whole folder created in step 4 to the rest of the nodes: .. code-block:: console - ssh-copy-id -i nova@remote-host - - This command installs your public key in a remote machine's - ``authorized_keys`` folder. + # scp -r /var/lib/nova/.ssh remote-host:/var/lib/nova/ #. Ensure that the nova user can now log in to each node without using a password: .. code-block:: console - # su nova + # su - nova $ ssh *computeNodeAddress* $ exit diff --git a/doc/source/admin/support-compute.rst b/doc/source/admin/support-compute.rst index 04b8eed98d4..8522e51d795 100644 --- a/doc/source/admin/support-compute.rst +++ b/doc/source/admin/support-compute.rst @@ -9,8 +9,18 @@ a compute node to the instances that run on that node. Another common problem is trying to run 32-bit images on a 64-bit compute node. This section shows you how to troubleshoot Compute. +.. todo:: Move the sections below into sub-pages for readability. + +.. toctree:: + :maxdepth: 1 + + troubleshooting/orphaned-allocations.rst + troubleshooting/rebuild-placement-db.rst + troubleshooting/affinity-policy-violated.rst + + Compute service logging -~~~~~~~~~~~~~~~~~~~~~~~ +----------------------- Compute stores a log file for each service in ``/var/log/nova``. For example, ``nova-compute.log`` is the log for the ``nova-compute`` service. You can set @@ -31,8 +41,9 @@ settings. In ``nova.conf``, include the ``logfile`` option to enable logging. Alternatively you can set ``use_syslog = 1`` so that the nova daemon logs to syslog. + Guru Meditation reports -~~~~~~~~~~~~~~~~~~~~~~~ +----------------------- A Guru Meditation report is sent by the Compute service upon receipt of the ``SIGUSR2`` signal (``SIGUSR1`` before Mitaka). This report is a @@ -66,10 +77,11 @@ The report has the following sections: For more information, see :doc:`/reference/gmr`. + .. _compute-common-errors-and-fixes: Common errors and fixes for Compute -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------- The `ask.openstack.org `_ site offers a place to ask and answer questions, and you can also mark questions as frequently asked @@ -77,16 +89,17 @@ questions. This section describes some errors people have posted previously. Bugs are constantly being fixed, so online resources are a great way to get the most up-to-date errors and fixes. + Credential errors, 401, and 403 forbidden errors -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------ Problem -------- +~~~~~~~ Missing credentials cause a ``403 forbidden`` error. Solution --------- +~~~~~~~~ To resolve this issue, use one of these methods: @@ -107,11 +120,41 @@ services. When your CA information is available, create your ZIP file. Also, check your HTTP proxy settings to see whether they cause problems with ``novarc`` creation. + +Live migration permission issues +-------------------------------- + +Problem +~~~~~~~ + +When live migrating an instance, you may see errors like the below: + +.. code-block:: shell + + libvirtError: operation failed: Failed to connect to remote libvirt URI + qemu+ssh://stack@cld6b16/system: Cannot recv data: Host key verification + failed.: Connection reset by peer + +Solution +~~~~~~~~ + +Ensure you have completed all the steps outlined in +:doc:`/admin/ssh-configuration`. In particular, it's important to note +that the ``libvirt`` process runs as ``root`` even though it may be connecting +to a different user (``stack`` in the above example). You can ensure everything +is correctly configured by attempting to connect to the remote host via the +``root`` user. Using the above example once again: + +.. code-block:: shell + + $ su - -c 'ssh stack@cld6b16' + + Instance errors -~~~~~~~~~~~~~~~ +--------------- Problem -------- +~~~~~~~ Sometimes a particular instance shows ``pending`` or you cannot SSH to it. Sometimes the image itself is the problem. For example, when you use flat @@ -119,7 +162,7 @@ manager networking, you do not have a DHCP server and certain images do not support interface injection; you cannot connect to them. Solution --------- +~~~~~~~~ To fix instance errors use an image that does support this method, such as Ubuntu, which obtains an IP address correctly with FlatManager network @@ -153,11 +196,12 @@ if this command returns an error: # virsh create libvirt.xml + Empty log output for Linux instances -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------ Problem -------- +~~~~~~~ You can view the log output of running instances from either the :guilabel:`Log` tab of the dashboard or the output of :command:`nova @@ -169,7 +213,7 @@ instance via a serial console while the instance itself is not configured to send output to the console. Solution --------- +~~~~~~~~ To rectify this, append the following parameters to kernel arguments specified in the instance's boot loader: @@ -181,16 +225,17 @@ in the instance's boot loader: Upon rebooting, the instance will be configured to send output to the Compute service. + Reset the state of an instance -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------ Problem -------- +~~~~~~~ Instances can remain in an intermediate state, such as ``deleting``. Solution --------- +~~~~~~~~ You can use the :command:`nova reset-state` command to manually reset the state of an instance to an error state. You can then delete the instance. For @@ -208,17 +253,18 @@ active state instead of an error state. For example: $ nova reset-state --active c6bbbf26-b40a-47e7-8d5c-eb17bf65c485 + Injection problems -~~~~~~~~~~~~~~~~~~ +------------------ Problem -------- +~~~~~~~ Instances may boot slowly, or do not boot. File injection can cause this problem. Solution --------- +~~~~~~~~ To disable injection in libvirt, set the following in ``nova.conf``: @@ -229,45 +275,22 @@ To disable injection in libvirt, set the following in ``nova.conf``: .. note:: - If you have not enabled the configuration drive and you want to make - user-specified files available from the metadata server for to improve - performance and avoid boot failure if injection fails, you must disable - injection. + If you have not enabled the config drive and you want to make user-specified + files available from the metadata server for to improve performance and + avoid boot failure if injection fails, you must disable injection. -Disable live snapshotting -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Problem -------- - -Administrators using libvirt version ``1.2.2`` may experience problems with -live snapshot creation. Occasionally, libvirt version ``1.2.2`` fails to create -live snapshots under the load of creating concurrent snapshot. - -Solution --------- - -To effectively disable the libvirt live snapshotting, until the problem is -resolved, configure the ``disable_libvirt_livesnapshot`` option. You can turn -off the live snapshotting mechanism by setting up its value to ``True`` in the -``[workarounds]`` section of the ``nova.conf`` file: - -.. code-block:: ini - - [workarounds] - disable_libvirt_livesnapshot = True Cannot find suitable emulator for x86_64 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +---------------------------------------- Problem -------- +~~~~~~~ When you attempt to create a VM, the error shows the VM is in the ``BUILD`` then ``ERROR`` state. Solution --------- +~~~~~~~~ On the KVM host, run :command:`cat /proc/cpuinfo`. Make sure the ``vmx`` or ``svm`` flags are set. @@ -276,16 +299,17 @@ Follow the instructions in the :ref:`enable-kvm` section in the Nova Configuration Reference to enable hardware virtualization support in your BIOS. + Failed to attach volume after detaching -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------------------- Problem -------- +~~~~~~~ Failed to attach a volume after detaching the same volume. Solution --------- +~~~~~~~~ You must change the device name on the :command:`nova-attach` command. The VM might not clean up after a :command:`nova-detach` command runs. This example @@ -311,11 +335,12 @@ You might also have this problem after attaching and detaching the same volume from the same VM with the same mount point multiple times. In this case, restart the KVM host. + Failed to attach volume, systool is not installed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------- Problem -------- +~~~~~~~ This warning and error occurs if you do not have the required ``sysfsutils`` package installed on the compute node: @@ -332,7 +357,7 @@ package installed on the compute node: Failed to attach volume 13d5c633-903a-4764-a5a0-3336945b1db1 at /dev/vdk. Solution --------- +~~~~~~~~ Install the ``sysfsutils`` package on the compute node. For example: @@ -340,11 +365,12 @@ Install the ``sysfsutils`` package on the compute node. For example: # apt-get install sysfsutils + Failed to connect volume in FC SAN -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +---------------------------------- Problem -------- +~~~~~~~ The compute node failed to connect to a volume in a Fibre Channel (FC) SAN configuration. The WWN may not be zoned correctly in your FC SAN that links the @@ -364,16 +390,17 @@ compute host to the storage array: operation.(HTTP 500)(Request-ID: req-71e5132b-21aa-46ee-b3cc-19b5b4ab2f00) Solution --------- +~~~~~~~~ The network administrator must configure the FC SAN fabric by correctly zoning the WWN (port names) from your compute node HBAs. + Multipath call failed exit -~~~~~~~~~~~~~~~~~~~~~~~~~~ +-------------------------- Problem -------- +~~~~~~~ Multipath call failed exit. This warning occurs in the Compute log if you do not have the optional ``multipath-tools`` package installed on the compute @@ -389,7 +416,7 @@ your message are unique to your system. Multipath call failed exit (96) Solution --------- +~~~~~~~~ Install the ``multipath-tools`` package on the compute node. For example: @@ -397,11 +424,12 @@ Install the ``multipath-tools`` package on the compute node. For example: # apt-get install multipath-tools + Failed to Attach Volume, Missing sg_scan -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +---------------------------------------- Problem -------- +~~~~~~~ Failed to attach volume to an instance, ``sg_scan`` file not found. This error occurs when the sg3-utils package is not installed on the compute node. The @@ -416,7 +444,7 @@ IDs in your message are unique to your system: Stdout: '/usr/local/bin/nova-rootwrap: Executable not found: /usr/bin/sg_scan' Solution --------- +~~~~~~~~ Install the ``sg3-utils`` package on the compute node. For example: @@ -424,11 +452,12 @@ Install the ``sg3-utils`` package on the compute node. For example: # apt-get install sg3-utils + Requested microversions are ignored -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +----------------------------------- Problem -------- +~~~~~~~ When making a request with a microversion beyond 2.1, for example: @@ -443,9 +472,73 @@ thought it is allowed with the `2.15 microversion`_. .. _2.15 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id13 Solution --------- +~~~~~~~~ Ensure the ``compute`` endpoint in the identity service catalog is pointing at ``/v2.1`` instead of ``/v2``. The former route supports microversions, while the latter route is considered the legacy v2.0 compatibility-mode route which renders all requests as if they were made on the legacy v2.0 API. + + +.. _user_token_timeout: + +User token times out during long-running operations +--------------------------------------------------- + +Problem +~~~~~~~ + +Long-running operations such as live migration or snapshot can sometimes +overrun the expiry of the user token. In such cases, post operations such +as cleaning up after a live migration can fail when the nova-compute service +needs to cleanup resources in other services, such as in the block-storage +(cinder) or networking (neutron) services. + +For example: + +.. code-block:: console + + 2018-12-17 13:47:29.591 16987 WARNING nova.virt.libvirt.migration [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live migration not completed after 2400 sec + 2018-12-17 13:47:30.097 16987 WARNING nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Migration operation was cancelled + 2018-12-17 13:47:30.299 16987 ERROR nova.virt.libvirt.driver [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Live Migration failure: operation aborted: migration job: canceled by client: libvirtError: operation aborted: migration job: canceled by client + 2018-12-17 13:47:30.685 16987 INFO nova.compute.manager [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] [instance: ead8ecc3-f473-4672-a67b-c44534c6042d] Swapping old allocation on 3e32d595-bd1f-4136-a7f4-c6703d2fbe18 held by migration 17bec61d-544d-47e0-a1c1-37f9d7385286 for instance + 2018-12-17 13:47:32.450 16987 ERROR nova.volume.cinder [req-7bc758de-b2e4-461b-a971-f79be6cd4703 313d1247d7b845da9c731eec53e50a26 2f693c782fa748c2baece8db95b4ba5b - default default] Delete attachment failed for attachment 58997d5b-24f0-4073-819e-97916fb1ee19. Error: The request you have made requires authentication. (HTTP 401) Code: 401: Unauthorized: The request you have made requires authentication. (HTTP 401) + +Solution +~~~~~~~~ + +Configure nova to use service user tokens to supplement the regular user token +used to initiate the operation. The identity service (keystone) will then +authenticate a request using the service user token if the user token has +already expired. + +To use, create a service user in the identity service similar as you would when +creating the ``nova`` service user. + +Then configure the :oslo.config:group:`service_user` section of the nova +configuration file, for example: + +.. code-block:: ini + + [service_user] + send_service_user_token = True + auth_type = password + project_domain_name = Default + project_name = service + user_domain_name = Default + password = secretservice + username = nova + auth_url = https://104.130.216.102/identity + ... + +And configure the other identity options as necessary for the service user, +much like you would configure nova to work with the image service (glance) +or networking service. + +.. note:: + + Please note that the role of the :oslo.config:group:`service_user` you + configure needs to be a superset of + :oslo.config:option:`keystone_authtoken.service_token_roles` (The option + :oslo.config:option:`keystone_authtoken.service_token_roles` is configured + in cinder, glance and neutron). diff --git a/doc/source/admin/system-admin.rst b/doc/source/admin/system-admin.rst deleted file mode 100644 index 9dee99296dd..00000000000 --- a/doc/source/admin/system-admin.rst +++ /dev/null @@ -1,89 +0,0 @@ -.. _compute-trusted-pools.rst: - -===================== -System administration -===================== - -.. toctree:: - :maxdepth: 2 - - manage-users.rst - manage-volumes.rst - flavors.rst - default-ports.rst - admin-password-injection.rst - manage-the-cloud.rst - manage-logs.rst - root-wrap-reference.rst - configuring-migrations.rst - live-migration-usage.rst - remote-console-access.rst - service-groups.rst - node-down.rst - adv-config.rst - -To effectively administer compute, you must understand how the different -installed nodes interact with each other. Compute can be installed in many -different ways using multiple servers, but generally multiple compute nodes -control the virtual servers and a cloud controller node contains the remaining -Compute services. - -The Compute cloud works using a series of daemon processes named ``nova-*`` -that exist persistently on the host machine. These binaries can all run on the -same machine or be spread out on multiple boxes in a large deployment. The -responsibilities of services and drivers are: - -**Services** - -``nova-api`` - Receives XML requests and sends them to the rest of the system. A WSGI app - routes and authenticates requests. Supports the OpenStack Compute APIs. A - ``nova.conf`` configuration file is created when Compute is installed. - -``nova-compute`` - Manages virtual machines. Loads a Service object, and exposes the public - methods on ComputeManager through a Remote Procedure Call (RPC). - -``nova-conductor`` - Provides database-access support for compute nodes (thereby reducing security - risks). - -``nova-consoleauth`` - Manages console authentication. - - .. deprecated:: 18.0.0 - - ``nova-consoleauth`` is deprecated since 18.0.0 (Rocky) and will be removed - in an upcoming release. - -``nova-objectstore`` - A simple file-based storage system for images that replicates most of the S3 - API. It can be replaced with OpenStack Image service and either a simple - image manager or OpenStack Object Storage as the virtual machine image - storage facility. It must exist on the same node as ``nova-compute``. - -``nova-network`` - Manages floating and fixed IPs, DHCP, bridging and VLANs. Loads a Service - object which exposes the public methods on one of the subclasses of - NetworkManager. Different networking strategies are available by changing the - ``network_manager`` configuration option to ``FlatManager``, - ``FlatDHCPManager``, or ``VLANManager`` (defaults to ``VLANManager`` if - nothing is specified). - - .. deprecated:: 14.0.0 - - ``nova-network`` was deprecated in the OpenStack Newton release. - -``nova-scheduler`` - Dispatches requests for new virtual machines to the correct node. - -``nova-novncproxy`` - Provides a VNC proxy for browsers, allowing VNC consoles to access virtual - machines. - -.. note:: - - Some services have drivers that change how the service implements its core - functionality. For example, the ``nova-compute`` service supports drivers - that let you choose which hypervisor type it can use. ``nova-network`` and - ``nova-scheduler`` also have drivers. diff --git a/doc/source/admin/troubleshooting/affinity-policy-violated.rst b/doc/source/admin/troubleshooting/affinity-policy-violated.rst new file mode 100644 index 00000000000..a7a563491e2 --- /dev/null +++ b/doc/source/admin/troubleshooting/affinity-policy-violated.rst @@ -0,0 +1,78 @@ +Affinity policy violated with parallel requests +=============================================== + +Problem +------- + +Parallel server create requests for affinity or anti-affinity land on the same +host and servers go to the ``ACTIVE`` state even though the affinity or +anti-affinity policy was violated. + +Solution +-------- + +There are two ways to avoid anti-/affinity policy violations among multiple +server create requests. + +Create multiple servers as a single request +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Use the `multi-create API`_ with the ``min_count`` parameter set or the +`multi-create CLI`_ with the ``--min`` option set to the desired number of +servers. + +This works because when the batch of requests is visible to ``nova-scheduler`` +at the same time as a group, it will be able to choose compute hosts that +satisfy the anti-/affinity constraint and will send them to the same hosts or +different hosts accordingly. + +.. _multi-create API: https://docs.openstack.org/api-ref/compute/#create-multiple-servers +.. _multi-create CLI: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/server.html#server-create + +Adjust Nova configuration settings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When requests are made separately and the scheduler cannot consider the batch +of requests at the same time as a group, anti-/affinity races are handled by +what is called the "late affinity check" in ``nova-compute``. Once a server +lands on a compute host, if the request involves a server group, +``nova-compute`` contacts the API database (via ``nova-conductor``) to retrieve +the server group and then it checks whether the affinity policy has been +violated. If the policy has been violated, ``nova-compute`` initiates a +reschedule of the server create request. Note that this means the deployment +must have :oslo.config:option:`scheduler.max_attempts` set greater than ``1`` +(default is ``3``) to handle races. + +An ideal configuration for multiple cells will minimize `upcalls`_ from the +cells to the API database. This is how devstack, for example, is configured in +the CI gate. The cell conductors do not set +:oslo.config:option:`api_database.connection` and ``nova-compute`` sets +:oslo.config:option:`workarounds.disable_group_policy_check_upcall` to +``True``. + +However, if a deployment needs to handle racing affinity requests, it needs to +configure cell conductors to have access to the API database, for example: + +.. code-block:: ini + + [api_database] + connection = mysql+pymysql://root:a@127.0.0.1/nova_api?charset=utf8 + +The deployment also needs to configure ``nova-compute`` services not to disable +the group policy check upcall by either not setting (use the default) +:oslo.config:option:`workarounds.disable_group_policy_check_upcall` or setting +it to ``False``, for example: + +.. code-block:: ini + + [workarounds] + disable_group_policy_check_upcall = False + +With these settings, anti-/affinity policy should not be violated even when +parallel server create requests are racing. + +Future work is needed to add anti-/affinity support to the placement service in +order to eliminate the need for the late affinity check in ``nova-compute``. + +.. _upcalls: https://docs.openstack.org/nova/latest/user/cellsv2-layout.html#operations-requiring-upcalls + diff --git a/doc/source/admin/troubleshooting/orphaned-allocations.rst b/doc/source/admin/troubleshooting/orphaned-allocations.rst new file mode 100644 index 00000000000..ca49aa4aab4 --- /dev/null +++ b/doc/source/admin/troubleshooting/orphaned-allocations.rst @@ -0,0 +1,201 @@ +Orphaned resource allocations +============================= + +Problem +------- + +There are orphaned resource allocations in the placement service which can +cause resource providers to: + +* Appear to the scheduler to be more utilized than they really are +* Prevent deletion of compute services + +One scenario in which this could happen is a compute service host is having +problems so the administrator forces it down and evacuates servers from it. +Note that in this case "evacuates" refers to the server ``evacuate`` action, +not live migrating all servers from the running compute service. Assume the +compute host is down and fenced. + +In this case, the servers have allocations tracked in placement against both +the down source compute node and their current destination compute host. For +example, here is a server *vm1* which has been evacuated from node *devstack1* +to node *devstack2*: + +.. code-block:: console + + $ openstack --os-compute-api-version 2.53 compute service list --service nova-compute + +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+ + | ID | Binary | Host | Zone | Status | State | Updated At | + +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+ + | e3c18c2d-9488-4863-b728-f3f292ec5da8 | nova-compute | devstack1 | nova | enabled | down | 2019-10-25T20:13:51.000000 | + | 50a20add-cc49-46bd-af96-9bb4e9247398 | nova-compute | devstack2 | nova | enabled | up | 2019-10-25T20:13:52.000000 | + | b92afb2e-cd00-4074-803e-fff9aa379c2f | nova-compute | devstack3 | nova | enabled | up | 2019-10-25T20:13:53.000000 | + +--------------------------------------+--------------+-----------+------+---------+-------+----------------------------+ + $ vm1=$(openstack server show vm1 -f value -c id) + $ openstack server show $vm1 -f value -c OS-EXT-SRV-ATTR:host + devstack2 + +The server now has allocations against both *devstack1* and *devstack2* +resource providers in the placement service: + +.. code-block:: console + + $ devstack1=$(openstack resource provider list --name devstack1 -f value -c uuid) + $ devstack2=$(openstack resource provider list --name devstack2 -f value -c uuid) + $ openstack resource provider show --allocations $devstack1 + +-------------+-----------------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------+-----------------------------------------------------------------------------------------------------------+ + | uuid | 9546fce4-9fb5-4b35-b277-72ff125ad787 | + | name | devstack1 | + | generation | 6 | + | allocations | {u'a1e6e0b2-9028-4166-b79b-c177ff70fbb7': {u'resources': {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1}}} | + +-------------+-----------------------------------------------------------------------------------------------------------+ + $ openstack resource provider show --allocations $devstack2 + +-------------+-----------------------------------------------------------------------------------------------------------+ + | Field | Value | + +-------------+-----------------------------------------------------------------------------------------------------------+ + | uuid | 52d0182d-d466-4210-8f0d-29466bb54feb | + | name | devstack2 | + | generation | 3 | + | allocations | {u'a1e6e0b2-9028-4166-b79b-c177ff70fbb7': {u'resources': {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1}}} | + +-------------+-----------------------------------------------------------------------------------------------------------+ + $ openstack --os-placement-api-version 1.12 resource provider allocation show $vm1 + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + | resource_provider | generation | resources | project_id | user_id | + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + | 9546fce4-9fb5-4b35-b277-72ff125ad787 | 6 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af | + | 52d0182d-d466-4210-8f0d-29466bb54feb | 3 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af | + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + +One way to find all servers that were evacuated from *devstack1* is: + +.. code-block:: console + + $ nova migration-list --source-compute devstack1 --migration-type evacuation + +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+ + | Id | UUID | Source Node | Dest Node | Source Compute | Dest Compute | Dest Host | Status | Instance UUID | Old Flavor | New Flavor | Created At | Updated At | Type | + +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+ + | 1 | 8a823ba3-e2e9-4f17-bac5-88ceea496b99 | devstack1 | devstack2 | devstack1 | devstack2 | 192.168.0.1 | done | a1e6e0b2-9028-4166-b79b-c177ff70fbb7 | None | None | 2019-10-25T17:46:35.000000 | 2019-10-25T17:46:37.000000 | evacuation | + +----+--------------------------------------+-------------+-----------+----------------+--------------+-------------+--------+--------------------------------------+------------+------------+----------------------------+----------------------------+------------+ + +Trying to delete the resource provider for *devstack1* will fail while there +are allocations against it: + +.. code-block:: console + + $ openstack resource provider delete $devstack1 + Unable to delete resource provider 9546fce4-9fb5-4b35-b277-72ff125ad787: Resource provider has allocations. (HTTP 409) + +Solution +-------- + +Using the example resources above, remove the allocation for server *vm1* from +the *devstack1* resource provider. If you have `osc-placement +`_ 1.8.0 or newer, you can use the +:command:`openstack resource provider allocation unset` command to remove the +allocations for consumer *vm1* from resource provider *devstack1*: + +.. code-block:: console + + $ openstack --os-placement-api-version 1.12 resource provider allocation \ + unset --provider $devstack1 $vm1 + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + | resource_provider | generation | resources | project_id | user_id | + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + | 52d0182d-d466-4210-8f0d-29466bb54feb | 4 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af | + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + +If you have *osc-placement* 1.7.x or older, the ``unset`` command is not +available and you must instead overwrite the allocations. Note that we do not +use :command:`openstack resource provider allocation delete` here because that +will remove the allocations for the server from all resource providers, +including *devstack2* where it is now running; instead, we use +:command:`openstack resource provider allocation set` to overwrite the +allocations and only retain the *devstack2* provider allocations. If you do +remove all allocations for a given server, you can heal them later. See `Using +heal_allocations`_ for details. + +.. code-block:: console + + $ openstack --os-placement-api-version 1.12 resource provider allocation set $vm1 \ + --project-id 2f3bffc5db2b47deb40808a4ed2d7c7a \ + --user-id 2206168427c54d92ae2b2572bb0da9af \ + --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,VCPU=1 \ + --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,MEMORY_MB=512 \ + --allocation rp=52d0182d-d466-4210-8f0d-29466bb54feb,DISK_GB=1 + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + | resource_provider | generation | resources | project_id | user_id | + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + | 52d0182d-d466-4210-8f0d-29466bb54feb | 4 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | 2f3bffc5db2b47deb40808a4ed2d7c7a | 2206168427c54d92ae2b2572bb0da9af | + +--------------------------------------+------------+------------------------------------------------+----------------------------------+----------------------------------+ + +Once the *devstack1* resource provider allocations have been removed using +either of the approaches above, the *devstack1* resource provider can be +deleted: + +.. code-block:: console + + $ openstack resource provider delete $devstack1 + +And the related compute service if desired: + +.. code-block:: console + + $ openstack --os-compute-api-version 2.53 compute service delete e3c18c2d-9488-4863-b728-f3f292ec5da8 + +For more details on the resource provider commands used in this guide, refer +to the `osc-placement plugin documentation`_. + +.. _osc-placement plugin documentation: https://docs.openstack.org/osc-placement/latest/ + +Using heal_allocations +~~~~~~~~~~~~~~~~~~~~~~ + +If you have a particularly troubling allocation consumer and just want to +delete its allocations from all providers, you can use the +:command:`openstack resource provider allocation delete` command and then +heal the allocations for the consumer using the +:ref:`heal_allocations command `. For example: + +.. code-block:: console + + $ openstack resource provider allocation delete $vm1 + $ nova-manage placement heal_allocations --verbose --instance $vm1 + Looking for instances in cell: 04879596-d893-401c-b2a6-3d3aa096089d(cell1) + Found 1 candidate instances. + Successfully created allocations for instance a1e6e0b2-9028-4166-b79b-c177ff70fbb7. + Processed 1 instances. + $ openstack resource provider allocation show $vm1 + +--------------------------------------+------------+------------------------------------------------+ + | resource_provider | generation | resources | + +--------------------------------------+------------+------------------------------------------------+ + | 52d0182d-d466-4210-8f0d-29466bb54feb | 5 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | + +--------------------------------------+------------+------------------------------------------------+ + +Note that deleting allocations and then relying on ``heal_allocations`` may not +always be the best solution since healing allocations does not account for some +things: + +* `Migration-based allocations`_ would be lost if manually deleted during a + resize. These are allocations tracked by the migration resource record + on the source compute service during a migration. +* Healing allocations only partially support nested allocations. Nested + allocations due to Neutron ports having QoS policies are supported since + 20.0.0 (Train) release. But nested allocations due to vGPU or Cyborg device + profile requests in the flavor are not supported. Also if you are using + provider.yaml files on compute hosts to define additional resources, if those + resources are defined on child resource providers then instances using such + resources are not supported. + +If you do use the ``heal_allocations`` command to cleanup allocations for a +specific trouble instance, it is recommended to take note of what the +allocations were before you remove them in case you need to reset them manually +later. Use the :command:`openstack resource provider allocation show` command +to get allocations for a consumer before deleting them, e.g.: + +.. code-block:: console + + $ openstack --os-placement-api-version 1.12 resource provider allocation show $vm1 + +.. _Migration-based allocations: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/migration-allocations.html diff --git a/doc/source/admin/troubleshooting/rebuild-placement-db.rst b/doc/source/admin/troubleshooting/rebuild-placement-db.rst new file mode 100644 index 00000000000..cf877fe9aa4 --- /dev/null +++ b/doc/source/admin/troubleshooting/rebuild-placement-db.rst @@ -0,0 +1,56 @@ +Rebuild placement DB +==================== + +Problem +------- + +You have somehow changed a nova cell database and the ``compute_nodes`` table +entries are now reporting different uuids to the placement service but +placement already has ``resource_providers`` table entries with the same +names as those computes so the resource providers in placement and the +compute nodes in the nova database are not synchronized. Maybe this happens +as a result of restoring the nova cell database from a backup where the compute +hosts have not changed but they are using different uuids. + +Nova reports compute node inventory to placement using the +``hypervisor_hostname`` and uuid of the ``compute_nodes`` table to the +placement ``resource_providers`` table, which has a unique constraint on the +name (hostname in this case) and uuid. Trying to create a new resource provider +with a new uuid but the same name as an existing provider results in a 409 +error from placement, such as in `bug 1817833`_. + +.. _bug 1817833: https://bugs.launchpad.net/nova/+bug/1817833 + +Solution +-------- + +.. warning:: This is likely a last resort when *all* computes and resource + providers are not synchronized and it is simpler to just rebuild + the placement database from the current state of nova. This may, + however, not work when using placement for more advanced features + such as :neutron-doc:`ports with minimum bandwidth guarantees ` + or `accelerators `_. + Obviously testing first in a pre-production environment is ideal. + +These are the steps at a high level: + +#. Make a backup of the existing placement database in case these steps fail + and you need to start over. + +#. Recreate the placement database and run the schema migrations to + initialize the placement database. + +#. Either restart or wait for the + :oslo.config:option:`update_resources_interval` on the ``nova-compute`` + services to report resource providers and their inventory to placement. + +#. Run the :ref:`nova-manage placement heal_allocations ` + command to report allocations to placement for the existing instances in + nova. + +#. Run the :ref:`nova-manage placement sync_aggregates ` + command to synchronize nova host aggregates to placement resource provider + aggregates. + +Once complete, test your deployment as usual, e.g. running Tempest integration +and/or Rally tests, creating, migrating and deleting a server, etc. diff --git a/doc/source/admin/uefi.rst b/doc/source/admin/uefi.rst new file mode 100644 index 00000000000..8c10f205066 --- /dev/null +++ b/doc/source/admin/uefi.rst @@ -0,0 +1,69 @@ +==== +UEFI +==== + +.. versionadded:: 17.0.0 (Queens) + +Nova supports configuring a `UEFI bootloader`__ for guests. This brings about +important advantages over legacy BIOS bootloaders and allows for features such +as :doc:`secure-boot`. + +.. __: https://en.wikipedia.org/wiki/Unified_Extensible_Firmware_Interface + + +Enabling UEFI +------------- + +Currently the configuration of UEFI guest bootloaders is only supported when +using the libvirt compute driver with a :oslo.config:option:`libvirt.virt_type` +of ``kvm`` or ``qemu`` or when using the Hyper-V compute driver with certain +machine types. When using the libvirt compute driver with AArch64-based guests, +UEFI is automatically enabled as AArch64 does not support BIOS. + +.. todo:: + + Update this once compute drivers start reporting a trait indicating UEFI + bootloader support. + + +Configuring a flavor or image +----------------------------- + +Configuring a UEFI bootloader varies depending on the compute driver in use. + +.. rubric:: Libvirt + +UEFI support is enabled by default on AArch64-based guests. For other guest +architectures, you can request UEFI support with libvirt by setting the +``hw_firmware_type`` image property to ``uefi``. For example: + +.. code-block:: bash + + $ openstack image set --property hw_firmware_type=uefi $IMAGE + +.. rubric:: Hyper-V + +It is not possible to explicitly request UEFI support with Hyper-V. Rather, it +is enabled implicitly when using `Generation 2`__ guests. You can request a +Generation 2 guest by setting the ``hw_machine_type`` image metadata property +to ``hyperv-gen2``. For example: + +.. code-block:: bash + + $ openstack image set --property hw_machine_type=hyperv-gen2 $IMAGE + +.. __: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/plan/should-i-create-a-generation-1-or-2-virtual-machine-in-hyper-v + + +References +---------- + +* `Hyper-V UEFI Secure Boot (spec)`__ +* `Open Virtual Machine Firmware (OVMF) Status Report`__ +* `Anatomy of a boot, a QEMU perspective`__ +* `Should I create a generation 1 or 2 virtual machine in Hyper-V?`__ + +.. __: https://specs.openstack.org/openstack/nova-specs/specs/ocata/implemented/hyper-v-uefi-secureboot.html +.. __: http://www.linux-kvm.org/downloads/lersek/ovmf-whitepaper-c770f8c.txt +.. __: https://www.qemu.org/2020/07/03/anatomy-of-a-boot/ +.. __: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/plan/should-i-create-a-generation-1-or-2-virtual-machine-in-hyper-v diff --git a/doc/source/admin/upgrades.rst b/doc/source/admin/upgrades.rst new file mode 100644 index 00000000000..75ac5a4ca56 --- /dev/null +++ b/doc/source/admin/upgrades.rst @@ -0,0 +1,341 @@ +======== +Upgrades +======== + +Nova aims to provide upgrades with minimal downtime. + +Firstly, the data plane. There should be no VM downtime when you upgrade +Nova. Nova has had this since the early days. + +Secondly, we want no downtime during upgrades of the Nova control plane. +This document is trying to describe how we can achieve that. + +Once we have introduced the key concepts relating to upgrade, we will +introduce the process needed for a no downtime upgrade of nova. + + +.. _minimal_downtime_upgrade: + +Minimal Downtime Upgrade Process +-------------------------------- + +Plan your upgrade +~~~~~~~~~~~~~~~~~ + +* Read and ensure you understand the release notes for the next release. + +* You should ensure all required steps from the previous upgrade have been + completed, such as data migrations. + +* Make a backup of your database. Nova does not support downgrading of the + database. Hence, in case of upgrade failure, restoring database from backup + is the only choice. + +* During upgrade be aware that there will be additional load on nova-conductor. + You may find you need to add extra nova-conductor workers to deal with the + additional upgrade related load. + +Rolling upgrade process +~~~~~~~~~~~~~~~~~~~~~~~ + +To reduce downtime, the compute services can be upgraded in a rolling fashion. +It means upgrading a few services at a time. This results in a condition where +both old (N) and new (N+1) nova-compute services co-exist for a certain time +period. Note that, there is no upgrade of the hypervisor here, this is just +upgrading the nova services. If reduced downtime is not a concern (or lower +complexity is desired), all services may be taken down and restarted at the +same time. + +.. important:: + + Nova does not currently support the coexistence of N and N+2 or greater + :program:`nova-compute` or :program:`nova-conductor` services in the same + deployment. The `nova-conductor`` service will fail to start when a + ``nova-compute`` service that is older than the previous release (N-2 or + greater) is detected. Similarly, in a :doc:`deployment with multiple cells + `, neither the super conductor service nor any + per-cell conductor service will start if any other conductor service in the + deployment is older than the previous release. + +#. Before maintenance window: + + * Start the process with the controller node. Install the code for the next + version of Nova, either in a venv or a separate control plane node, + including all the python dependencies. + + * Using the newly installed nova code, run the DB sync. First run + ``nova-manage api_db sync``, then ``nova-manage db sync``. ``nova-manage + db sync`` should be run for all cell databases, including ``cell0``. If + necessary, the ``--config-file`` argument can be used to point to the + correct ``nova.conf`` file for the given cell. + + These schema change operations should have minimal or no effect on + performance, and should not cause any operations to fail. + + * At this point, new columns and tables may exist in the database. These + DB schema changes are done in a way that both the N and N+1 release can + perform operations against the same schema. + +#. During maintenance window: + + * Several nova services rely on the external placement service being at the + latest level. Therefore, you must upgrade placement before any nova + services. See the + :placement-doc:`placement upgrade notes ` for + more details on upgrading the placement service. + + * For maximum safety (no failed API operations), gracefully shutdown all + the services (i.e. SIG_TERM) except nova-compute. + + * Before restarting services with new code, perform the release-specific + readiness check with ``nova-status upgrade check``. See the + :ref:`nova-status upgrade check ` for more details + on status check. + + * Start all services on the new code, with + ``[upgrade_levels]compute=auto`` in nova.conf. It is safest to + start nova-conductor first and nova-api last. Note that you may + use a static alias name instead of ``auto``, such as + ``[upgrade_levels]compute=``. Also note that this step is + only required if compute services are not upgraded in lock-step + with the control services. + + * If desired, gracefully shutdown nova-compute (i.e. SIG_TERM) + services in small batches, then start the new version of the code + with: ``[upgrade_levels]compute=auto``. If this batch-based approach + is used, only a few compute nodes will have any delayed API + actions, and to ensure there is enough capacity online to service + any boot requests that happen during this time. + +#. After maintenance window: + + * Once all services are running the new code, double check in the DB that + there are no old orphaned service records using `nova service-list`. + + * Now that all services are upgraded, we need to send the SIG_HUP signal, so all + the services clear any cached service version data. When a new service + starts, it automatically detects which version of the compute RPC protocol + to use, and it can decide if it is safe to do any online data migrations. + Note, if you used a static value for the upgrade_level, such as + ``[upgrade_levels]compute=``, you must update nova.conf to remove + that configuration value and do a full service restart. + + * Now all the services are upgraded and signaled, the system is able to use + the latest version of the RPC protocol and can access all of the + features in the new release. + + * Once all the services are running the latest version of the code, and all + the services are aware they all have been upgraded, it is safe to + transform the data in the database into its new format. While some of this + work happens on demand when the system reads a database row that needs + updating, we must get all the data transformed into the current version + before the next upgrade. Additionally, some data may not be transformed + automatically so performing the data migration is necessary to avoid + performance degradation due to compatibility routines. + + * This process can put significant extra write load on the + database. Complete all online data migrations using: + ``nova-manage db online_data_migrations --max-count ``. Note + that you can use the ``--max-count`` argument to reduce the load this + operation will place on the database, which allows you to run a + small chunk of the migrations until all of the work is done. The chunk size + you should use depends on your infrastructure and how much additional load + you can impose on the database. To reduce load, perform smaller batches + with delays between chunks. To reduce time to completion, run larger batches. + Each time it is run, the command will show a summary of completed and remaining + records. If using the ``--max-count`` option, the command should be rerun + while it returns exit status 1 (which indicates that some migrations took + effect, and more work may remain to be done), even if some migrations + produce errors. If all possible migrations have completed and some are + still producing errors, exit status 2 will be returned. In this case, the + cause of the errors should be investigated and resolved. Migrations should be + considered successfully completed only when the command returns exit status 0. + + * At this point, you must also ensure you update the configuration, to stop + using any deprecated features or options, and perform any required work + to transition to alternative features. All deprecated options are + supported for at least one cycle, but should be removed before your next + upgrade is performed. + + +Current Database Upgrade Types +------------------------------ + +Currently Nova has two types of database upgrades that are in use. + +- Schema Migrations +- Data Migrations + +Nova does not support database downgrades. + +.. _schema-migrations: + +Schema Migrations +~~~~~~~~~~~~~~~~~ + +Schema migrations are defined in ``nova/db/main/migrations/versions`` and +``nova/db/api/migrations/versions``. They are the routines that transform our +database structure, which should be additive and able to be applied to a +running system before service code has been upgraded. + +For information on developing your own schema migrations as part of a feature +or bugfix, refer to :doc:`/reference/database-migrations`. + +.. note:: + + The API database migrations should be assumed to run before the + migrations for the main/cell databases. This is because the former + contains information about how to find and connect to the latter. + Some management commands that operate on multiple cells will attempt + to list and iterate over cell mapping records, which require a + functioning API database schema. + +.. _data-migrations: + +Data Migrations +~~~~~~~~~~~~~~~ + +Online data migrations occur in two places: + +#. Inline migrations that occur as part of normal run-time + activity as data is read in the old format and written in the + new format + +#. Background online migrations that are performed using + ``nova-manage`` to complete transformations that will not occur + incidentally due to normal runtime activity. + +An example of online data migrations are the flavor migrations done as part +of Nova object version 1.18. This included a transient migration of flavor +storage from one database location to another. + +For information on developing your own schema migrations as part of a feature +or bugfix, refer to :doc:`/reference/database-migrations`. + +Migration policy +~~~~~~~~~~~~~~~~ + +The following guidelines for schema and data migrations are followed in order +to ease upgrades: + +* Additive schema migrations - In general, almost all schema migrations should + be additive. Put simply, they should only create elements like columns, + indices, and tables. + +* Subtractive schema migrations - To remove an element like a column or table + during the N release cycle: + + #. The element must be deprecated and retained for backward compatibility. + (This allows for graceful upgrade from N to N+1.) + + #. Data migration, by the objects layer, must completely migrate data from + the old version of the schema to the new version. + + #. The column can then be removed with a migration at the start of N+2. + +* All schema migrations should be idempotent. For example, a migration + should check if an element exists in the schema before attempting to add + it. This logic comes for free in the autogenerated workflow of + the online migrations. + +* Constraints - When adding a foreign or unique key constraint, the schema + migration code needs to handle possible problems with data before applying + the constraint. (Example: A unique constraint must clean up duplicate + records before applying said constraint.) + +* Data migrations - As mentioned above, data migrations will be done in an + online fashion by custom code in the object layer that handles moving data + between the old and new portions of the schema. In addition, for each type + of data migration performed, there should exist a nova-manage option for an + operator to manually request that rows be migrated. + + +Concepts +-------- + +Here are the key concepts you need to know before reading the section on the +upgrade process: + +RPC version pinning + Through careful RPC versioning, newer nodes are able to talk to older + nova-compute nodes. When upgrading control plane nodes, we can pin them + at an older version of the compute RPC API, until all the compute nodes + are able to be upgraded. + https://wiki.openstack.org/wiki/RpcMajorVersionUpdates + + .. note:: + + The procedure for rolling upgrades with multiple cells v2 cells is not + yet determined. + +Online Configuration Reload + During the upgrade, we pin new serves at the older RPC version. When all + services are updated to use newer code, we need to unpin them so we are + able to use any new functionality. + To avoid having to restart the service, using the current SIGHUP signal + handling, or otherwise, ideally we need a way to update the currently + running process to use the latest configuration. + +Graceful service shutdown + Many nova services are python processes listening for messages on a + AMQP queue, including nova-compute. When sending the process the SIGTERM + the process stops getting new work from its queue, completes any + outstanding work, then terminates. During this process, messages can be + left on the queue for when the python process starts back up. + This gives us a way to shutdown a service using older code, and start + up a service using newer code with minimal impact. If its a service that + can have multiple workers, like nova-conductor, you can usually add the + new workers before the graceful shutdown of the old workers. In the case + of singleton services, like nova-compute, some actions could be delayed + during the restart, but ideally no actions should fail due to the restart. + + .. note:: + + While this is true for the RabbitMQ RPC backend, we need to confirm + what happens for other RPC backends. + +API load balancer draining + When upgrading API nodes, you can make your load balancer only send new + connections to the newer API nodes, allowing for a seamless update of your + API nodes. + +Expand/Contract DB Migrations + Modern databases are able to make many schema changes while you are still + writing to the database. Taking this a step further, we can make all DB + changes by first adding the new structures, expanding. Then you can slowly + move all the data into a new location and format. Once that is complete, + you can drop bits of the scheme that are no long needed, + i.e. contract. This happens multiple cycles after we have stopped + using a particular piece of schema, and can happen in a schema + migration without affecting runtime code. + +Online Data Migrations using objects + Since Kilo, we have moved all data migration into the DB objects code. + When trying to migrate data in the database from the old format to the + new format, this is done in the object code when reading or saving things + that are in the old format. For records that are not updated, you need to + run a background process to convert those records into the newer format. + This process must be completed before you contract the database schema. + +DB prune deleted rows + Currently resources are soft deleted in the main database, so users are able + to track instances in the DB that are created and destroyed in production. + However, most people have a data retention policy, of say 30 days or 90 + days after which they will want to delete those entries. Not deleting + those entries affects DB performance as indices grow very large and data + migrations take longer as there is more data to migrate. + +nova-conductor object backports + RPC pinning ensures new services can talk to the older service's method + signatures. But many of the parameters are objects that may well be too + new for the old service to understand, so you are able to send the object + back to the nova-conductor to be downgraded to a version the older service + can understand. + + +Testing +------- + +We use the "grenade" jobs to test upgrades. The current tests only cover the +existing upgrade process where old computes can run with new control plane but +control plane is turned off for DB migrations. diff --git a/doc/source/admin/vendordata.rst b/doc/source/admin/vendordata.rst new file mode 100644 index 00000000000..ff412e83e57 --- /dev/null +++ b/doc/source/admin/vendordata.rst @@ -0,0 +1,178 @@ +========== +Vendordata +========== + +.. note:: + + This section provides deployment information about the vendordata feature. + For end-user information about the vendordata feature and instance metadata + in general, refer to the :doc:`user guide `. + +The *vendordata* feature provides a way to pass vendor or deployment-specific +information to instances. This can be accessed by users using :doc:`the metadata +service ` or with :doc:`config drives +`. + +There are two vendordata modules provided with nova: ``StaticJSON`` and +``DynamicJSON``. + + +``StaticJSON`` +-------------- + +The ``StaticJSON`` module includes the contents of a static JSON file loaded +from disk. This can be used for things which don't change between instances, +such as the location of the corporate puppet server. It is the default provider. + +Configuration +~~~~~~~~~~~~~ + +The service you must configure to enable the ``StaticJSON`` vendordata module +depends on how guests are accessing vendordata. If using the metadata service, +configuration applies to either :program:`nova-api` or +:program:`nova-api-metadata`, depending on the deployment, while if using +config drives, configuration applies to :program:`nova-compute`. However, +configuration is otherwise the same and the following options apply: + +- :oslo.config:option:`api.vendordata_providers` +- :oslo.config:option:`api.vendordata_jsonfile_path` + +Refer to the :doc:`metadata service ` and :doc:`config +drive ` documentation for more information on how to +configure the required services. + + +``DynamicJSON`` +--------------- + +The ``DynamicJSON`` module can make a request to an external REST service to +determine what metadata to add to an instance. This is how we recommend you +generate things like Active Directory tokens which change per instance. + +When used, the ``DynamicJSON`` module will make a request to any REST services +listed in the :oslo.config:option:`api.vendordata_dynamic_targets` configuration +option. There can be more than one of these but note that they will be queried +once per metadata request from the instance which can mean a lot of traffic +depending on your configuration and the configuration of the instance. + +The following data is passed to your REST service as a JSON encoded POST: + +.. list-table:: + :header-rows: 1 + + * - Key + - Description + * - ``project-id`` + - The ID of the project that owns this instance. + * - ``instance-id`` + - The UUID of this instance. + * - ``image-id`` + - The ID of the image used to boot this instance. + * - ``user-data`` + - As specified by the user at boot time. + * - ``hostname`` + - The hostname of the instance. + * - ``metadata`` + - As specified by the user at boot time. + +Metadata fetched from the REST service will appear in the metadata service at a +new file called ``vendordata2.json``, with a path (either in the metadata service +URL or in the config drive) like this:: + + openstack/latest/vendor_data2.json + +For each dynamic target, there will be an entry in the JSON file named after +that target. For example: + +.. code-block:: json + + { + "testing": { + "value1": 1, + "value2": 2, + "value3": "three" + } + } + +The `novajoin`__ project provides a dynamic vendordata service to manage host +instantiation in an IPA server. + +__ https://opendev.org/x/novajoin + +Deployment considerations +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Nova provides authentication to external metadata services in order to provide +some level of certainty that the request came from nova. This is done by +providing a service token with the request -- you can then just deploy your +metadata service with the keystone authentication WSGI middleware. This is +configured using the keystone authentication parameters in the +:oslo.config:group:`vendordata_dynamic_auth` configuration group. + +Configuration +~~~~~~~~~~~~~ + +As with ``StaticJSON``, the service you must configure to enable the +``DynamicJSON`` vendordata module depends on how guests are accessing +vendordata. If using the metadata service, configuration applies to either +:program:`nova-api` or :program:`nova-api-metadata`, depending on the +deployment, while if using config drives, configuration applies to +:program:`nova-compute`. However, configuration is otherwise the same and the +following options apply: + +- :oslo.config:option:`api.vendordata_providers` +- :oslo.config:option:`api.vendordata_dynamic_ssl_certfile` +- :oslo.config:option:`api.vendordata_dynamic_connect_timeout` +- :oslo.config:option:`api.vendordata_dynamic_read_timeout` +- :oslo.config:option:`api.vendordata_dynamic_failure_fatal` +- :oslo.config:option:`api.vendordata_dynamic_targets` + +Refer to the :doc:`metadata service ` and :doc:`config +drive ` documentation for more information on how to +configure the required services. + +In addition, there are also many options related to authentication. These are +provided by :keystone-doc:`keystone <>` but are listed below for completeness: + +- :oslo.config:option:`vendordata_dynamic_auth.cafile` +- :oslo.config:option:`vendordata_dynamic_auth.certfile` +- :oslo.config:option:`vendordata_dynamic_auth.keyfile` +- :oslo.config:option:`vendordata_dynamic_auth.insecure` +- :oslo.config:option:`vendordata_dynamic_auth.timeout` +- :oslo.config:option:`vendordata_dynamic_auth.collect_timing` +- :oslo.config:option:`vendordata_dynamic_auth.split_loggers` +- :oslo.config:option:`vendordata_dynamic_auth.auth_type` +- :oslo.config:option:`vendordata_dynamic_auth.auth_section` +- :oslo.config:option:`vendordata_dynamic_auth.auth_url` +- :oslo.config:option:`vendordata_dynamic_auth.system_scope` +- :oslo.config:option:`vendordata_dynamic_auth.domain_id` +- :oslo.config:option:`vendordata_dynamic_auth.domain_name` +- :oslo.config:option:`vendordata_dynamic_auth.project_id` +- :oslo.config:option:`vendordata_dynamic_auth.project_name` +- :oslo.config:option:`vendordata_dynamic_auth.project_domain_id` +- :oslo.config:option:`vendordata_dynamic_auth.project_domain_name` +- :oslo.config:option:`vendordata_dynamic_auth.trust_id` +- :oslo.config:option:`vendordata_dynamic_auth.default_domain_id` +- :oslo.config:option:`vendordata_dynamic_auth.default_domain_name` +- :oslo.config:option:`vendordata_dynamic_auth.user_id` +- :oslo.config:option:`vendordata_dynamic_auth.username` +- :oslo.config:option:`vendordata_dynamic_auth.user_domain_id` +- :oslo.config:option:`vendordata_dynamic_auth.user_domain_name` +- :oslo.config:option:`vendordata_dynamic_auth.password` +- :oslo.config:option:`vendordata_dynamic_auth.tenant_id` +- :oslo.config:option:`vendordata_dynamic_auth.tenant_name` + +Refer to the :keystone-doc:`keystone documentation ` +for information on configuring these. + + +References +---------- + +* Michael Still's talk from the Queens summit in Sydney, `Metadata, User Data, + Vendor Data, oh my!`__ +* Michael's blog post on `deploying a simple vendordata service`__ which + provides more details and sample code to supplement the documentation above. + +__ https://www.openstack.org/videos/sydney-2017/metadata-user-data-vendor-data-oh-my +__ https://www.madebymikal.com/nova-vendordata-deployment-an-excessively-detailed-guide/ diff --git a/doc/source/admin/virtual-gpu.rst b/doc/source/admin/virtual-gpu.rst index f7b76a67b2e..358e613bc09 100644 --- a/doc/source/admin/virtual-gpu.rst +++ b/doc/source/admin/virtual-gpu.rst @@ -2,6 +2,11 @@ Attaching virtual GPU devices to guests ======================================= +.. important:: + + The functionality described below is only supported by the libvirt/KVM + driver. + The virtual GPU feature in Nova allows a deployment to provide specific GPU types for instances using physical GPUs that can provide virtual devices. @@ -10,14 +15,11 @@ Graphics Processing Unit (pGPU) can be virtualized as multiple virtual Graphics Processing Units (vGPUs) if the hypervisor supports the hardware driver and has the capability to create guests using those virtual devices. -This feature is highly dependent on the hypervisor, its version and the -physical devices present on the host. - -.. important:: As of the Queens release, there is no upstream continuous - integration testing with a hardware environment that has virtual - GPUs and therefore this feature is considered experimental. +This feature is highly dependent on the version of libvirt and the physical +devices present on the host. In addition, the vendor's vGPU driver software +must be installed and configured on the host at the same time. -Hypervisor-specific caveats are mentioned in the `Caveats`_ section. +Caveats are mentioned in the `Caveats`_ section. To enable virtual GPUs, follow the steps below: @@ -31,24 +33,53 @@ Enable GPU types (Compute) #. Specify which specific GPU type(s) the instances would get. - Edit :oslo.config:option:`devices.enabled_vgpu_types`: + Edit :oslo.config:option:`devices.enabled_mdev_types`: + + .. code-block:: ini + + [devices] + enabled_mdev_types = nvidia-35 + + If you want to support more than a single GPU type, you need to provide a + separate configuration section for each device. For example: .. code-block:: ini [devices] - enabled_vgpu_types = nvidia-35 + enabled_mdev_types = nvidia-35, nvidia-36 + + [mdev_nvidia-35] + device_addresses = 0000:84:00.0,0000:85:00.0 - .. note:: + [mdev_nvidia-36] + device_addresses = 0000:86:00.0 - As of the Queens release, Nova only supports a single type. If more - than one vGPU type is specified (as a comma-separated list), only the - first one will be used. + where you have to define which physical GPUs are supported per GPU type. + + If the same PCI address is provided for two different types, nova-compute + will refuse to start and issue a specific error in the logs. To know which specific type(s) to mention, please refer to `How to discover a GPU type`_. + .. versionchanged:: 21.0.0 + + Supporting multiple GPU types is only supported by the Ussuri release and + later versions. + #. Restart the ``nova-compute`` service. + + .. warning:: + + Changing the type is possible but since existing physical GPUs can't + address multiple guests having different types, that will make Nova + return you a NoValidHost if existing instances with the original type + still exist. Accordingly, it's highly recommended to instead deploy the + new type to new compute nodes that don't already have workloads and + rebuild instances on the nodes that need to change types. + + Configure a flavor (Controller) ------------------------------- @@ -60,13 +91,14 @@ Configure a flavor to request one virtual GPU: .. note:: - As of the Queens release, all hypervisors that support virtual GPUs - only accept a single virtual GPU per instance. + As of the Queens release, all hypervisors that support virtual GPUs + only accept a single virtual GPU per instance. The enabled vGPU types on the compute hosts are not exposed to API users. Flavors configured for vGPU support can be tied to host aggregates as a means to properly schedule those flavors onto the compute hosts that support them. -See the :doc:`/user/aggregates` for more information. +See :doc:`/admin/aggregates` for more information. + Create instances with virtual GPU devices ----------------------------------------- @@ -79,90 +111,178 @@ provided by compute nodes. $ openstack server create --flavor vgpu_1 --image cirros-0.3.5-x86_64-uec --wait test-vgpu -.. note:: - - As of the Queens release, only the *FilterScheduler* scheduler driver - uses the Placement API. - How to discover a GPU type -------------------------- -Depending on your hypervisor: +Virtual GPUs are seen as mediated devices. Physical PCI devices (the graphic +card here) supporting virtual GPUs propose mediated device (mdev) types. Since +mediated devices are supported by the Linux kernel through sysfs files after +installing the vendor's virtual GPUs driver software, you can see the required +properties as follows: + +.. code-block:: console -- For libvirt, virtual GPUs are seen as mediated devices. Physical PCI devices - (the graphic card here) supporting virtual GPUs propose mediated device - (mdev) types. Since mediated devices are supported by the Linux kernel - through sysfs files, you can see the required properties as follows: + $ ls /sys/class/mdev_bus/*/mdev_supported_types + /sys/class/mdev_bus/0000:84:00.0/mdev_supported_types: + nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 - .. code-block:: console + /sys/class/mdev_bus/0000:85:00.0/mdev_supported_types: + nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 - $ ls /sys/class/mdev_bus/*/mdev_supported_types - /sys/class/mdev_bus/0000:84:00.0/mdev_supported_types: - nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 + /sys/class/mdev_bus/0000:86:00.0/mdev_supported_types: + nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 - /sys/class/mdev_bus/0000:85:00.0/mdev_supported_types: - nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 + /sys/class/mdev_bus/0000:87:00.0/mdev_supported_types: + nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 - /sys/class/mdev_bus/0000:86:00.0/mdev_supported_types: - nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 - /sys/class/mdev_bus/0000:87:00.0/mdev_supported_types: - nvidia-35 nvidia-36 nvidia-37 nvidia-38 nvidia-39 nvidia-40 nvidia-41 nvidia-42 nvidia-43 nvidia-44 nvidia-45 +Checking allocations and inventories for virtual GPUs +----------------------------------------------------- +.. note:: -- For XenServer, virtual GPU types are created by XenServer at startup - depending on the available hardware and config files present in dom0. - You can run the command of ``xe vgpu-type-list`` from dom0 to get the - available vGPU types. The value for the field of ``model-name ( RO):`` - is the vGPU type's name which can be used to set the nova config option - ``[devices]/enabled_vgpu_types``. See the following example: + The information below is only valid from the 19.0.0 Stein release. Before + this release, inventories and allocations related to a ``VGPU`` resource + class are still on the root resource provider related to the compute node. + If upgrading from Rocky and using the libvirt driver, ``VGPU`` inventory and + allocations are moved to child resource providers that represent actual + physical GPUs. + +The examples you will see are using the `osc-placement plugin`_ for +OpenStackClient. For details on specific commands, see its documentation. + +#. Get the list of resource providers + + .. code-block:: console + + $ openstack resource provider list + +--------------------------------------+---------------------------------------------------------+------------+ + | uuid | name | generation | + +--------------------------------------+---------------------------------------------------------+------------+ + | 5958a366-3cad-416a-a2c9-cfbb5a472287 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx | 7 | + | fc9b9287-ef5e-4408-aced-d5577560160c | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_86_00_0 | 2 | + | e2f8607b-0683-4141-a8af-f5e20682e28c | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_85_00_0 | 3 | + | 85dd4837-76f9-41f2-9f19-df386017d8a0 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_87_00_0 | 2 | + | 7033d860-8d8a-4963-8555-0aa902a08653 | virtlab606.xxxxxxxxxxxxxxxxxxxxxxxxxxx_pci_0000_84_00_0 | 2 | + +--------------------------------------+---------------------------------------------------------+------------+ + + In this example, we see the root resource provider + ``5958a366-3cad-416a-a2c9-cfbb5a472287`` with four other resource providers + that are its children and where each of them corresponds to a single + physical GPU. + +#. Check the inventory of each resource provider to see resource classes + + .. code-block:: console + + $ openstack resource provider inventory list 5958a366-3cad-416a-a2c9-cfbb5a472287 + +----------------+------------------+----------+----------+-----------+----------+-------+ + | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total | + +----------------+------------------+----------+----------+-----------+----------+-------+ + | VCPU | 16.0 | 48 | 0 | 1 | 1 | 48 | + | MEMORY_MB | 1.5 | 65442 | 512 | 1 | 1 | 65442 | + | DISK_GB | 1.0 | 49 | 0 | 1 | 1 | 49 | + +----------------+------------------+----------+----------+-----------+----------+-------+ + $ openstack resource provider inventory list e2f8607b-0683-4141-a8af-f5e20682e28c + +----------------+------------------+----------+----------+-----------+----------+-------+ + | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total | + +----------------+------------------+----------+----------+-----------+----------+-------+ + | VGPU | 1.0 | 16 | 0 | 1 | 1 | 16 | + +----------------+------------------+----------+----------+-----------+----------+-------+ + + Here you can see a ``VGPU`` inventory on the child resource provider while + other resource class inventories are still located on the root resource + provider. + +#. Check allocations for each server that is using virtual GPUs + + .. code-block:: console + + $ openstack server list + +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+ + | ID | Name | Status | Networks | Image | Flavor | + +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+ + | 5294f726-33d5-472a-bef1-9e19bb41626d | vgpu2 | ACTIVE | private=10.0.0.14, fd45:cdad:c431:0:f816:3eff:fe78:a748 | cirros-0.4.0-x86_64-disk | vgpu | + | a6811fc2-cec8-4f1d-baea-e2c6339a9697 | vgpu1 | ACTIVE | private=10.0.0.34, fd45:cdad:c431:0:f816:3eff:fe54:cc8f | cirros-0.4.0-x86_64-disk | vgpu | + +--------------------------------------+-------+--------+---------------------------------------------------------+--------------------------+--------+ + + $ openstack resource provider allocation show 5294f726-33d5-472a-bef1-9e19bb41626d + +--------------------------------------+------------+------------------------------------------------+ + | resource_provider | generation | resources | + +--------------------------------------+------------+------------------------------------------------+ + | 5958a366-3cad-416a-a2c9-cfbb5a472287 | 8 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | + | 7033d860-8d8a-4963-8555-0aa902a08653 | 3 | {u'VGPU': 1} | + +--------------------------------------+------------+------------------------------------------------+ + + $ openstack resource provider allocation show a6811fc2-cec8-4f1d-baea-e2c6339a9697 + +--------------------------------------+------------+------------------------------------------------+ + | resource_provider | generation | resources | + +--------------------------------------+------------+------------------------------------------------+ + | e2f8607b-0683-4141-a8af-f5e20682e28c | 3 | {u'VGPU': 1} | + | 5958a366-3cad-416a-a2c9-cfbb5a472287 | 8 | {u'VCPU': 1, u'MEMORY_MB': 512, u'DISK_GB': 1} | + +--------------------------------------+------------+------------------------------------------------+ + + In this example, two servers were created using a flavor asking for 1 + ``VGPU``, so when looking at the allocations for each consumer UUID (which + is the server UUID), you can see that VGPU allocation is against the child + resource provider while other allocations are for the root resource + provider. Here, that means that the virtual GPU used by + ``a6811fc2-cec8-4f1d-baea-e2c6339a9697`` is actually provided by the + physical GPU having the PCI ID ``0000:85:00.0``. + + +(Optional) Provide custom traits for multiple GPU types +------------------------------------------------------- + +Since operators want to support different GPU types per compute, it would be +nice to have flavors asking for a specific GPU type. This is now possible +using custom traits by decorating child Resource Providers that correspond +to physical GPUs. - .. code-block:: console +.. note:: - [root@trailblazer-2 ~]# xe vgpu-type-list - uuid ( RO) : 78d2d963-41d6-4130-8842-aedbc559709f - vendor-name ( RO): NVIDIA Corporation - model-name ( RO): GRID M60-8Q - max-heads ( RO): 4 - max-resolution ( RO): 4096x2160 + Possible improvements in a future release could consist of providing + automatic tagging of Resource Providers with standard traits corresponding + to versioned mapping of public GPU types. For the moment, this has to be + done manually. +#. Get the list of resource providers - uuid ( RO) : a1bb1692-8ce3-4577-a611-6b4b8f35a5c9 - vendor-name ( RO): NVIDIA Corporation - model-name ( RO): GRID M60-0Q - max-heads ( RO): 2 - max-resolution ( RO): 2560x1600 + See `Checking allocations and inventories for virtual GPUs`_ first for getting + the list of Resource Providers that support a ``VGPU`` resource class. +#. Define custom traits that will correspond for each to a GPU type - uuid ( RO) : 69d03200-49eb-4002-b661-824aec4fd26f - vendor-name ( RO): NVIDIA Corporation - model-name ( RO): GRID M60-2A - max-heads ( RO): 1 - max-resolution ( RO): 1280x1024 + .. code-block:: console + $ openstack --os-placement-api-version 1.6 trait create CUSTOM_NVIDIA_11 - uuid ( RO) : c58b1007-8b47-4336-95aa-981a5634d03d - vendor-name ( RO): NVIDIA Corporation - model-name ( RO): GRID M60-4Q - max-heads ( RO): 4 - max-resolution ( RO): 4096x2160 + In this example, we ask to create a custom trait named ``CUSTOM_NVIDIA_11``. +#. Add the corresponding trait to the Resource Provider matching the GPU - uuid ( RO) : 292a2b20-887f-4a13-b310-98a75c53b61f - vendor-name ( RO): NVIDIA Corporation - model-name ( RO): GRID M60-2Q - max-heads ( RO): 4 - max-resolution ( RO): 4096x2160 + .. code-block:: console + $ openstack --os-placement-api-version 1.6 resource provider trait set \ + --trait CUSTOM_NVIDIA_11 e2f8607b-0683-4141-a8af-f5e20682e28c - uuid ( RO) : d377db6b-a068-4a98-92a8-f94bd8d6cc5d - vendor-name ( RO): NVIDIA Corporation - model-name ( RO): GRID M60-0B - max-heads ( RO): 2 - max-resolution ( RO): 2560x1600 + In this case, the trait ``CUSTOM_NVIDIA_11`` will be added to the Resource + Provider with the UUID ``e2f8607b-0683-4141-a8af-f5e20682e28c`` that + corresponds to the PCI address ``0000:85:00:0`` as shown above. - ... +#. Amend the flavor to add a requested trait + + .. code-block:: console + + $ openstack flavor set --property trait:CUSTOM_NVIDIA_11=required vgpu_1 + + In this example, we add the ``CUSTOM_NVIDIA_11`` trait as a required + information for the ``vgpu_1`` flavor we created earlier. + + This will allow the Placement service to only return the Resource Providers + matching this trait so only the GPUs that were decorated with will be checked + for this flavor. Caveats @@ -173,8 +293,6 @@ Caveats This information is correct as of the 17.0.0 Queens release. Where improvements have been made or issues fixed, they are noted per item. -For libvirt: - * Suspending a guest that has vGPUs doesn't yet work because of a libvirt limitation (it can't hot-unplug mediated devices from a guest). Workarounds using other instance actions (like snapshotting the instance or shelving it) @@ -183,45 +301,63 @@ For libvirt: that will cause the instance to be set back to ACTIVE. The ``suspend`` action in the ``os-instance-actions`` API will have an *Error* state. + .. versionchanged:: 25.0.0 + + This has been resolved in the Yoga release and backported to Xena. See + `bug 1948705`_. + * Resizing an instance with a new flavor that has vGPU resources doesn't allocate those vGPUs to the instance (the instance is created without vGPU resources). The proposed workaround is to rebuild the instance after resizing it. The rebuild operation allocates vGPUS to the instance. + .. versionchanged:: 21.0.0 + + This has been resolved in the Ussuri release. See `bug 1778563`_. + * Cold migrating an instance to another host will have the same problem as resize. If you want to migrate an instance, make sure to rebuild it after the migration. + .. versionchanged:: 21.0.0 + + This has been resolved in the Ussuri release. See `bug 1778563`_. + * Rescue images do not use vGPUs. An instance being rescued does not keep its vGPUs during rescue. During that time, another instance can receive those vGPUs. This is a known issue. The recommended workaround is to rebuild an instance immediately after rescue. However, rebuilding the rescued instance only helps if there are other free vGPUs on the host. - .. note:: This has been resolved in the Rocky release [#]_. + .. versionchanged:: 18.0.0 -For XenServer: + This has been resolved in the Rocky release. See `bug 1762688`_. -* Suspend and live migration with vGPUs attached depends on support from the - underlying XenServer version. Please see XenServer release notes for up to - date information on when a hypervisor supporting live migration and - suspend/resume with vGPUs is available. If a suspend or live migrate operation - is attempted with a XenServer version that does not support that operation, an - internal exception will occur that will cause nova setting the instance to - be in ERROR status. You can use the command of - ``openstack server set --state active `` to set it back to ACTIVE. +For nested vGPUs: -* Resizing an instance with a new flavor that has vGPU resources doesn't - allocate those vGPUs to the instance (the instance is created without - vGPU resources). The proposed workaround is to rebuild the instance after - resizing it. The rebuild operation allocates vGPUS to the instance. +.. note:: -* Cold migrating an instance to another host will have the same problem as - resize. If you want to migrate an instance, make sure to rebuild it after the - migration. + This information is correct as of the 21.0.0 Ussuri release. Where + improvements have been made or issues fixed, they are noted per item. + +* If creating servers with a flavor asking for vGPUs and the user wants + multi-create (i.e. say --max 2) then the scheduler could be returning + a NoValidHosts exception even if each physical GPU can support at least + one specific instance, if the total wanted capacity is not supported by + only one physical GPU. + (See `bug 1874664 `_.) + + For example, creating servers with a flavor asking for vGPUs, if two + children RPs have 4 vGPU inventories each: + + - You can ask for a flavor with 2 vGPU with --max 2. + - But you can't ask for a flavor with 4 vGPU and --max 2. -.. [#] https://bugs.launchpad.net/nova/+bug/1762688 +.. _bug 1778563: https://bugs.launchpad.net/nova/+bug/1778563 +.. _bug 1762688: https://bugs.launchpad.net/nova/+bug/1762688 +.. _bug 1948705: https://bugs.launchpad.net/nova/+bug/1948705 .. Links .. _Intel GVT-g: https://01.org/igvt-g .. _NVIDIA GRID vGPU: http://docs.nvidia.com/grid/5.0/pdf/grid-vgpu-user-guide.pdf +.. _osc-placement plugin: https://docs.openstack.org/osc-placement/latest/index.html diff --git a/doc/source/admin/virtual-persistent-memory.rst b/doc/source/admin/virtual-persistent-memory.rst new file mode 100644 index 00000000000..95ad9a942f8 --- /dev/null +++ b/doc/source/admin/virtual-persistent-memory.rst @@ -0,0 +1,270 @@ +============================================= +Attaching virtual persistent memory to guests +============================================= + +.. versionadded:: 20.0.0 (Train) + +Starting in the 20.0.0 (Train) release, the virtual persistent memory (vPMEM) +feature in Nova allows a deployment using the libvirt compute driver to provide +vPMEMs for instances using physical persistent memory (PMEM) that can provide +virtual devices. + +PMEM must be partitioned into `PMEM namespaces`_ for applications to use. +This vPMEM feature only uses PMEM namespaces in ``devdax`` mode as QEMU +`vPMEM backends`_. If you want to dive into related notions, the document +`NVDIMM Linux kernel document`_ is recommended. + +To enable vPMEMs, follow the steps below. + + +Dependencies +------------ + +The following are required to support the vPMEM feature: + +* Persistent Memory Hardware + + One such product is Intel® Optane™ DC Persistent Memory. + `ipmctl`_ is used to configure it. + +* Linux Kernel version >= 4.18 with the following modules loaded: + + ``dax_pmem``, ``nd_pmem``, ``device_dax``, ``nd_btt`` + +.. note:: + + NVDIMM support is present in the Linux Kernel v4.0 or newer. It is + recommended to use Kernel version 4.2 or later since `NVDIMM support + `_ + is enabled by default. We met some bugs in older versions, and we have + done all verification works with OpenStack on 4.18 version, so 4.18 + version and newer will probably guarantee its functionality. + +* QEMU version >= 3.1.0 + +* Libvirt version >= 5.0.0 + +* `ndctl`_ version >= 62 + +* daxio version >= 1.6 + +The vPMEM feature has been verified under the software and hardware listed above. + + +Configure PMEM namespaces (Compute) +----------------------------------- + +#. Create PMEM namespaces as `vPMEM backends`_ using the `ndctl`_ utility. + + For example, to create a 30GiB namespace named ``ns3``: + + .. code-block:: console + + $ sudo ndctl create-namespace -s 30G -m devdax -M mem -n ns3 + { + "dev":"namespace1.0", + "mode":"devdax", + "map":"mem", + "size":"30.00 GiB (32.21 GB)", + "uuid":"937e9269-512b-4f65-9ac6-b74b61075c11", + "raw_uuid":"17760832-a062-4aef-9d3b-95ea32038066", + "daxregion":{ + "id":1, + "size":"30.00 GiB (32.21 GB)", + "align":2097152, + "devices":[ + { + "chardev":"dax1.0", + "size":"30.00 GiB (32.21 GB)" + } + ] + }, + "name":"ns3", + "numa_node":1 + } + + Then list the available PMEM namespaces on the host: + + .. code-block:: console + + $ ndctl list -X + [ + { + ... + "size":6440353792, + ... + "name":"ns0", + ... + }, + { + ... + "size":6440353792, + ... + "name":"ns1", + ... + }, + { + ... + "size":6440353792, + ... + "name":"ns2", + ... + }, + { + ... + "size":32210157568, + ... + "name":"ns3", + ... + } + ] + +#. Specify which PMEM namespaces should be available to instances. + + Edit :oslo.config:option:`libvirt.pmem_namespaces`: + + .. code-block:: ini + + [libvirt] + # pmem_namespaces=$LABEL:$NSNAME[|$NSNAME][,$LABEL:$NSNAME[|$NSNAME]] + pmem_namespaces = 6GB:ns0|ns1|ns2,LARGE:ns3 + + Configured PMEM namespaces must have already been created on the host as + described above. The conf syntax allows the admin to associate one or more + namespace ``$NSNAME``\ s with an arbitrary ``$LABEL`` that can subsequently + be used in a flavor to request one of those namespaces. It is recommended, + but not required, for namespaces under a single ``$LABEL`` to be the same + size. + +#. Restart the ``nova-compute`` service. + + Nova will invoke `ndctl`_ to identify the configured PMEM namespaces, and + report vPMEM resources to placement. + + +Configure a flavor +------------------ + +Specify a comma-separated list of the ``$LABEL``\ s from +:oslo.config:option:`libvirt.pmem_namespaces` to the flavor's ``hw:pmem`` +property. Note that multiple instances of the same label are permitted: + +.. code-block:: console + + $ openstack flavor set --property hw:pmem='6GB' my_flavor + $ openstack flavor set --property hw:pmem='6GB,LARGE' my_flavor_large + $ openstack flavor set --property hw:pmem='6GB,6GB' m1.medium + +.. note:: If a NUMA topology is specified, all vPMEM devices will be put on + guest NUMA node 0; otherwise nova will generate one NUMA node + automatically for the guest. + +Based on the above examples, an ``openstack server create`` request with +``my_flavor_large`` will spawn an instance with two vPMEMs. One, corresponding +to the ``LARGE`` label, will be ``ns3``; the other, corresponding to the ``6G`` +label, will be arbitrarily chosen from ``ns0``, ``ns1``, or ``ns2``. + +.. note:: + + Using vPMEM inside a virtual machine requires the following: + + * Guest kernel version 4.18 or higher; + * The ``dax_pmem``, ``nd_pmem``, ``device_dax``, and ``nd_btt`` kernel + modules; + * The `ndctl`_ utility. + +.. note:: When resizing an instance with vPMEMs, the vPMEM data won't be + migrated. + + +Verify inventories and allocations +---------------------------------- +This section describes how to check that: + +* vPMEM inventories were created correctly in placement, validating the + `configuration described above <#configure-pmem-namespaces-compute>`_. +* allocations were created correctly in placement for instances spawned from + `flavors configured with vPMEMs <#configure-a-flavor>`_. + +.. note:: + + Inventories and allocations related to vPMEM resource classes are on the + root resource provider related to the compute node. + +#. Get the list of resource providers + + .. code-block:: console + + $ openstack resource provider list + +--------------------------------------+--------+------------+ + | uuid | name | generation | + +--------------------------------------+--------+------------+ + | 1bc545f9-891f-4930-ab2b-88a56078f4be | host-1 | 47 | + | 7d994aef-680d-43d4-9325-a67c807e648e | host-2 | 67 | + --------------------------------------+---------+------------+ + +#. Check the inventory of each resource provider to see resource classes + + Each ``$LABEL`` configured in :oslo.config:option:`libvirt.pmem_namespaces` + is used to generate a resource class named ``CUSTOM_PMEM_NAMESPACE_$LABEL``. + Nova will report to Placement the number of vPMEM namespaces configured for + each ``$LABEL``. For example, assuming ``host-1`` was configured as + described above: + + .. code-block:: console + + $ openstack resource provider inventory list 1bc545f9-891f-4930-ab2b-88a56078f4be + +-----------------------------+------------------+----------+----------+-----------+----------+--------+ + | resource_class | allocation_ratio | max_unit | reserved | step_size | min_unit | total | + +-----------------------------+------------------+----------+----------+-----------+----------+--------+ + | VCPU | 16.0 | 64 | 0 | 1 | 1 | 64 | + | MEMORY_MB | 1.5 | 190604 | 512 | 1 | 1 | 190604 | + | CUSTOM_PMEM_NAMESPACE_LARGE | 1.0 | 1 | 0 | 1 | 1 | 1 | + | CUSTOM_PMEM_NAMESPACE_6GB | 1.0 | 3 | 0 | 1 | 1 | 3 | + | DISK_GB | 1.0 | 439 | 0 | 1 | 1 | 439 | + +-----------------------------+------------------+----------+----------+-----------+----------+--------+ + + Here you can see the vPMEM resource classes prefixed with + ``CUSTOM_PMEM_NAMESPACE_``. The ``LARGE`` label was configured with one + namespace (``ns3``), so it has an inventory of ``1``. Since the ``6GB`` + label was configured with three namespaces (``ns0``, ``ns1``, and ``ns2``), + the ``CUSTOM_PMEM_NAMESPACE_6GB`` inventory has a ``total`` and ``max_unit`` + of ``3``. + +#. Check allocations for each server that is using vPMEMs + + .. code-block:: console + + $ openstack server list + +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+ + | ID | Name | Status | Networks | Image | Flavor | + +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+ + | 41d3e139-de5c-40fd-9d82-016b72f2ba1d | server-with-2-vpmems | ACTIVE | private=10.0.0.24 | ubuntu-bionic | my_flavor_large | + | a616a7f6-b285-4adf-a885-dd8426dd9e6a | server-with-1-vpmem | ACTIVE | private=10.0.0.13 | ubuntu-bionic | my_flavor | + +--------------------------------------+----------------------+--------+-------------------+---------------+-----------------+ + + $ openstack resource provider allocation show 41d3e139-de5c-40fd-9d82-016b72f2ba1d + +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+ + | resource_provider | generation | resources | + +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+ + | 1bc545f9-891f-4930-ab2b-88a56078f4be | 49 | {u'MEMORY_MB': 32768, u'VCPU': 16, u'DISK_GB': 20, u'CUSTOM_PMEM_NAMESPACE_6GB': 1, u'CUSTOM_PMEM_NAMESPACE_LARGE': 1} | + +--------------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+ + + $ openstack resource provider allocation show a616a7f6-b285-4adf-a885-dd8426dd9e6a + +--------------------------------------+------------+-----------------------------------------------------------------------------------+ + | resource_provider | generation | resources | + +--------------------------------------+------------+-----------------------------------------------------------------------------------+ + | 1bc545f9-891f-4930-ab2b-88a56078f4be | 49 | {u'MEMORY_MB': 8192, u'VCPU': 8, u'DISK_GB': 20, u'CUSTOM_PMEM_NAMESPACE_6GB': 1} | + +--------------------------------------+------------+-----------------------------------------------------------------------------------+ + + In this example, two servers were created. ``server-with-2-vpmems`` used + ``my_flavor_large`` asking for one ``6GB`` vPMEM and one ``LARGE`` vPMEM. + ``server-with-1-vpmem`` used ``my_flavor`` asking for a single ``6GB`` + vPMEM. + + +.. _`PMEM namespaces`: http://pmem.io/ndctl/ndctl-create-namespace.html +.. _`vPMEM backends`: https://github.com/qemu/qemu/blob/19b599f7664b2ebfd0f405fb79c14dd241557452/docs/nvdimm.txt#L145 +.. _`NVDIMM Linux kernel document`: https://www.kernel.org/doc/Documentation/nvdimm/nvdimm.txt +.. _`ipmctl`: https://software.intel.com/en-us/articles/quick-start-guide-configure-intel-optane-dc-persistent-memory-on-linux +.. _`ndctl`: http://pmem.io/ndctl/ diff --git a/doc/source/cli/index.rst b/doc/source/cli/index.rst index bf2d164d9be..a5b1d89bfc1 100644 --- a/doc/source/cli/index.rst +++ b/doc/source/cli/index.rst @@ -31,6 +31,7 @@ database. :maxdepth: 1 nova-manage + nova-policy nova-status Service Daemons @@ -46,12 +47,10 @@ daemonize correctly after starting up. nova-api nova-compute nova-conductor - nova-console nova-novncproxy nova-scheduler nova-serialproxy nova-spicehtml5proxy - nova-xvpvncproxy WSGI Services ------------- @@ -77,17 +76,3 @@ are documented for completeness and debugging if something goes wrong. :maxdepth: 1 nova-rootwrap - -Deprecated Services -------------------- - -The following services are deprecated in nova. They should not be used in new -deployments, but are documented for existing ones. - -.. toctree:: - :maxdepth: 1 - - nova-cells - nova-dhcpbridge - nova-network - nova-consoleauth diff --git a/doc/source/cli/nova-api-metadata.rst b/doc/source/cli/nova-api-metadata.rst index 3121280fd4d..f6f5d8afba2 100644 --- a/doc/source/cli/nova-api-metadata.rst +++ b/doc/source/cli/nova-api-metadata.rst @@ -2,47 +2,49 @@ nova-api-metadata ================= --------------------------------- -Server for the Nova Metadata API --------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-api-metadata Synopsis ======== :: - nova-api-metadata [options] + nova-api-metadata [...] Description =========== :program:`nova-api-metadata` is a server daemon that serves the Nova Metadata -API. +API. This daemon routes database requests via the ``nova-conductor`` service, +so there are some considerations about using this in a +:ref:`multi-cell layout `. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/api-paste.ini`` -* ``/etc/nova/policy.json`` +* ``/etc/nova/policy.yaml`` +* ``/etc/nova/policy.d/`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== -* :nova-doc:`OpenStack Nova <>` -* :nova-doc:`Using WSGI with Nova ` +:doc:`nova-api(1) `, +:doc:`nova-api-os-compute(1) ` Bugs ==== diff --git a/doc/source/cli/nova-api-os-compute.rst b/doc/source/cli/nova-api-os-compute.rst index 56d53eed208..6564f03626c 100644 --- a/doc/source/cli/nova-api-os-compute.rst +++ b/doc/source/cli/nova-api-os-compute.rst @@ -2,21 +2,14 @@ nova-api-os-compute =================== ------------------------------------------- -Server for the Nova OpenStack Compute APIs ------------------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-api-os-compute Synopsis ======== :: - nova-api-os-compute [options] + nova-api-os-compute [...] Description =========== @@ -27,22 +20,29 @@ OpenStack Compute API. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/api-paste.ini`` -* ``/etc/nova/policy.json`` +* ``/etc/nova/policy.yaml`` +* ``/etc/nova/policy.d/`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== -* :nova-doc:`OpenStack Nova <>` -* :nova-doc:`Using WSGI with Nova ` +:doc:`nova-api(1) `, +:doc:`nova-api-metadata(1) ` Bugs ==== diff --git a/doc/source/cli/nova-api.rst b/doc/source/cli/nova-api.rst index ea58d671a02..b10efc6b391 100644 --- a/doc/source/cli/nova-api.rst +++ b/doc/source/cli/nova-api.rst @@ -2,21 +2,14 @@ nova-api ======== -------------------------------------- -Server for the OpenStack Compute APIs -------------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-api Synopsis ======== :: - nova-api [options] + nova-api [...] Description =========== @@ -27,22 +20,29 @@ APIs in separate greenthreads. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== * ``/etc/nova/nova.conf`` * ``/etc/nova/api-paste.ini`` -* ``/etc/nova/policy.json`` +* ``/etc/nova/policy.yaml`` +* ``/etc/nova/policy.d/`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== -* :nova-doc:`OpenStack Nova <>` -* :nova-doc:`Using WSGI with Nova ` +:doc:`nova-api-metadata(1) `, +:doc:`nova-api-os-compute(1) ` Bugs ==== diff --git a/doc/source/cli/nova-cells.rst b/doc/source/cli/nova-cells.rst deleted file mode 100644 index 90b66393848..00000000000 --- a/doc/source/cli/nova-cells.rst +++ /dev/null @@ -1,54 +0,0 @@ -========== -nova-cells -========== - -------------------------- -Server for the Nova Cells -------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing - -Synopsis -======== - -:: - - nova-cells [options] - -Description -=========== - -:program:`nova-cells` is a server daemon that serves the Nova Cells service, -which handles communication between cells and selects cells for new instances. - -.. deprecated:: 16.0.0 - Everything in this document is referring to Cells v1, which is - not recommended for new deployments and is deprecated in favor of Cells v2 - as of the 16.0.0 Pike release. For information about commands to use - with Cells v2, see the man page for :ref:`man-page-cells-v2`. - -Options -======= - -**General options** - -Files -===== - -* ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` -* ``/etc/nova/rootwrap.conf`` -* ``/etc/nova/rootwrap.d/`` - -See Also -======== - -* :nova-doc:`OpenStack Nova <>` - -Bugs -==== - -* Nova bugs are managed at `Launchpad `__ diff --git a/doc/source/cli/nova-compute.rst b/doc/source/cli/nova-compute.rst index f48478e8be8..f190949efa5 100644 --- a/doc/source/cli/nova-compute.rst +++ b/doc/source/cli/nova-compute.rst @@ -2,21 +2,14 @@ nova-compute ============ -------------------- -Nova Compute Server -------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-compute Synopsis ======== :: - nova-compute [options] + nova-compute [...] Description =========== @@ -29,20 +22,34 @@ instance's state, attaching persistent storage, and terminating the instance. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== +.. todo: We shouldn't have policy configuration in this non-API service, but + bug #1675486 means we do have one + * ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` +* ``/etc/nova/policy.yaml`` +* ``/etc/nova/policy.d/`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-conductor(1) `, +:doc:`nova-manage(1) `, +:doc:`nova-rootwrap(1) `, +:doc:`nova-scheduler(1) `, +:doc:`nova-status(1) ` Bugs ==== diff --git a/doc/source/cli/nova-conductor.rst b/doc/source/cli/nova-conductor.rst index cfa53806452..3020250e398 100644 --- a/doc/source/cli/nova-conductor.rst +++ b/doc/source/cli/nova-conductor.rst @@ -2,21 +2,14 @@ nova-conductor ============== ------------------------------ -Server for the Nova Conductor ------------------------------ - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-conductor Synopsis ======== :: - nova-conductor [options] + nova-conductor [...] Description =========== @@ -27,7 +20,13 @@ service, which provides coordination and database query support for nova. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== @@ -37,7 +36,11 @@ Files See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-compute(1) `, +:doc:`nova-manage(1) `, +:doc:`nova-rootwrap(1) `, +:doc:`nova-scheduler(1) `, +:doc:`nova-status(1) ` Bugs ==== diff --git a/doc/source/cli/nova-console.rst b/doc/source/cli/nova-console.rst deleted file mode 100644 index 1d3eb4e1860..00000000000 --- a/doc/source/cli/nova-console.rst +++ /dev/null @@ -1,49 +0,0 @@ -============ -nova-console -============ - -------------------- -Nova Console Server -------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing - -Synopsis -======== - -:: - - nova-console [options] - -Description -=========== - -:program:`nova-console` is a server daemon that serves the Nova Console -service, which is a console proxy to set up multi-tenant VM console access, -e.g. with *XVP*. - -Options -======= - -**General options** - -Files -===== - -* ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` -* ``/etc/nova/rootwrap.conf`` -* ``/etc/nova/rootwrap.d/`` - -See Also -======== - -* :nova-doc:`OpenStack Nova <>` - -Bugs -==== - -* Nova bugs are managed at `Launchpad `__ diff --git a/doc/source/cli/nova-consoleauth.rst b/doc/source/cli/nova-consoleauth.rst deleted file mode 100644 index 32e037ae59a..00000000000 --- a/doc/source/cli/nova-consoleauth.rst +++ /dev/null @@ -1,53 +0,0 @@ -================ -nova-consoleauth -================ - ----------------------------------- -Nova Console Authentication Server ----------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing - -Synopsis -======== - -:: - - nova-consoleauth [options] - -Description -=========== - -:program:`nova-consoleauth` is a server daemon that serves the Nova Console -Auth service, which provides authentication for Nova consoles. - -.. deprecated:: 18.0.0 - - `nova-consoleauth` is deprecated since 18.0.0 (Rocky) and will be removed in - an upcoming release. - -Options -======= - -**General options** - -Files -===== - -* ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` -* ``/etc/nova/rootwrap.conf`` -* ``/etc/nova/rootwrap.d/`` - -See Also -======== - -* :nova-doc:`OpenStack Nova <>` - -Bugs -==== - -* Nova bugs are managed at `Launchpad `__ diff --git a/doc/source/cli/nova-dhcpbridge.rst b/doc/source/cli/nova-dhcpbridge.rst deleted file mode 100644 index 9fe5b143b8b..00000000000 --- a/doc/source/cli/nova-dhcpbridge.rst +++ /dev/null @@ -1,56 +0,0 @@ -=============== -nova-dhcpbridge -=============== - ------------------------------------------------- -Handles Lease Database updates from DHCP servers ------------------------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing - -Synopsis -======== - -:: - - nova-dhcpbridge [options] - -Description -=========== - -:program:`nova-dhcpbridge` is an application that handles lease database -updates from DHCP servers. :program:`nova-dhcpbridge` is used whenever nova is -managing DHCP (vlan and flatDHCP). :program:`nova-dhcpbridge` should not be run -as a daemon. - -.. warning:: - - This application is only for use with ``nova-network``, which is not - recommended for new deployments. - -Options -======= - -**General options** - -Files -===== - -* ``/etc/nova/nova.conf`` -* ``/etc/nova/api-paste.ini`` -* ``/etc/nova/policy.json`` -* ``/etc/nova/rootwrap.conf`` -* ``/etc/nova/rootwrap.d/`` - -See Also -======== - -* :nova-doc:`OpenStack Nova <>` - -Bugs -==== - -* Nova bugs are managed at `Launchpad `__ diff --git a/doc/source/cli/nova-manage.rst b/doc/source/cli/nova-manage.rst index 8c7c7f66fa7..4dc614db2aa 100644 --- a/doc/source/cli/nova-manage.rst +++ b/doc/source/cli/nova-manage.rst @@ -2,21 +2,15 @@ nova-manage =========== -------------------------------------------- -Control and manage cloud computer instances -------------------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-manage Synopsis ======== :: - nova-manage [] + nova-manage [ [...]] + Description =========== @@ -24,334 +18,1653 @@ Description :program:`nova-manage` controls cloud computing instances by managing various admin-only aspects of Nova. +The standard pattern for executing a :program:`nova-manage` command is:: + + nova-manage [] + +Run without arguments to see a list of available command categories:: + + nova-manage + +You can also run with a category argument such as ``db`` to see a list of all +commands in that category:: + + nova-manage db + + Options ======= -The standard pattern for executing a nova-manage command is:: +These options apply to all commands and may be given in any order, before or +after commands. Individual commands may provide additional options. Options +without an argument can be combined after a single dash. - nova-manage [] +.. option:: -h, --help -Run without arguments to see a list of available command categories:: + Show a help message and exit - nova-manage +.. option:: --config-dir -You can also run with a category argument such as user to see a list of all -commands in that category:: + Path to a config directory to pull ``*.conf`` files from. This file set is + sorted, so as to provide a predictable parse order if individual options + are over-ridden. The set is parsed after the file(s) specified via previous + :option:`--config-file`, arguments hence over-ridden options in the + directory take precedence. This option must be set from the command-line. + +.. option:: --config-file + + Path to a config file to use. Multiple config files can be specified, with + values in later files taking precedence. Defaults to None. This option must + be set from the command-line. + +.. option:: --log-config-append , --log-config , --log_config + + The name of a logging configuration file. This file is appended to any + existing logging configuration files. For details about logging + configuration files, see the Python logging module documentation. Note that + when logging configuration files are used then all logging configuration is + set in the configuration file and other logging configuration options are + ignored (for example, :option:`--log-date-format`). + +.. option:: --log-date-format + + Defines the format string for ``%(asctime)s`` in log records. Default: + None. This option is ignored if :option:`--log-config-append` is set. + +.. option:: --log-dir , --logdir + + The base directory used for relative log_file paths. + This option is ignored if :option:`--log-config-append` is set. + +.. option:: --log-file PATH, --logfile + + Name of log file to send logging output to. + If no default is set, logging will go to stderr as defined by use_stderr. + This option is ignored if :option:`--log-config-append` is set. + +.. option:: --syslog-log-facility SYSLOG_LOG_FACILITY + + Syslog facility to receive log lines. + This option is ignored if :option:`--log-config-append` is set. + +.. option:: --use-journal + + Enable journald for logging. If running in a systemd environment you may + wish to enable journal support. Doing so will use the journal native + protocol which includes structured metadata in addition to log + messages. This option is ignored if :option:`--log-config-append` is + set. + +.. option:: --nouse-journal + + The inverse of :option:`--use-journal`. + +.. option:: --use-json + + Use JSON formatting for logging. This option is ignored if + :option:`--log-config-append` is set. + +.. option:: --nouse-json + + The inverse of :option:`--use-json`. + +.. option:: --use-syslog + + Use syslog for logging. Existing syslog format is DEPRECATED and will be + changed later to honor RFC5424. This option is ignored if + :option:`--log-config-append` is set. + +.. option:: --nouse-syslog + + The inverse of :option:`--use-syslog`. + +.. option:: --watch-log-file + + Uses logging handler designed to watch file system. When log file is moved + or removed this handler will open a new log file with specified path + instantaneously. It makes sense only if :option:`--log-file` option is + specified and Linux platform is used. This option is ignored if + :option:`--log-config-append` is set. + +.. option:: --nowatch-log-file + + The inverse of :option:`--watch-log-file`. + +.. option:: --debug, -d + + If enabled, the logging level will be set to ``DEBUG`` instead of the + default ``INFO`` level. + +.. option:: --nodebug + + The inverse of :option:`--debug`. + +.. option:: --post-mortem + + Allow post-mortem debugging. + +.. option:: --nopost-mortem + + The inverse of :option:`--post-mortem`. + +.. option:: --version + + Show program's version number and exit + + +Database Commands +================= + +db version +---------- + +.. program:: nova-manage db version + +.. code-block:: shell + + nova-manage db version + +Print the current main database version. + +db sync +------- + +.. program:: nova-manage db sync + +.. code-block:: shell + + nova-manage db sync [--local_cell] [VERSION] + +Upgrade the main database schema up to the most recent version or ``VERSION`` +if specified. By default, this command will also attempt to upgrade the schema +for the cell0 database if it is mapped. +If :option:`--local_cell` is specified, then only the main database in the +current cell is upgraded. The local database connection is determined by +:oslo.config:option:`database.connection` in the configuration file, passed to +nova-manage using the ``--config-file`` option(s). + +Refer to the :program:`nova-manage cells_v2 map_cell0` or +:program:`nova-manage cells_v2 simple_cell_setup` commands for more details on +mapping the cell0 database. + +This command should be run **after** :program:`nova-manage api_db sync`. + +.. rubric:: Options + +.. option:: --local_cell + + Only sync db in the local cell: do not attempt to fan-out to all cells. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Successfully synced database schema. + * - 1 + - Failed to access cell0. + +.. versionchanged:: 20.0.0 (Train) + + Removed support for the legacy ``--version `` argument. + +.. versionchanged:: 24.0.0 (Xena) + + Migrated versioning engine to alembic. The optional ``VERSION`` argument is + now expected to be an alembic-based version. sqlalchemy-migrate-based + versions will be rejected. + +db archive_deleted_rows +----------------------- + +.. program:: nova-manage db archive_deleted_rows + +.. code-block:: shell + + nova-manage db archive_deleted_rows [--max_rows ] [--verbose] + [--until-complete] [--before ] [--purge] [--all-cells] [--task-log] + [--sleep] + +Move deleted rows from production tables to shadow tables. Note that the +corresponding rows in the ``instance_mappings``, ``request_specs`` and +``instance_group_member`` tables of the API database are purged when +instance records are archived and thus, +:oslo.config:option:`api_database.connection` is required in the config +file. + +If automating, this should be run continuously while the result is 1, +stopping at 0, or use the :option:`--until-complete` option. + +.. versionchanged:: 24.0.0 (Xena) + + Added :option:`--task-log`, :option:`--sleep` options. + +.. rubric:: Options + +.. option:: --max_rows + + Maximum number of deleted rows to archive. Defaults to 1000. Note that this + number does not include the corresponding rows, if any, that are removed + from the API database for deleted instances. + +.. option:: --before + + Archive rows that have been deleted before ````. Accepts date strings + in the default format output by the ``date`` command, as well as + ``YYYY-MM-DD[HH:mm:ss]``. For example:: + + # Purge shadow table rows older than a specific date + nova-manage db archive --before 2015-10-21 + # or + nova-manage db archive --before "Oct 21 2015" + # Times are also accepted + nova-manage db archive --before "2015-10-21 12:00" + + Note that relative dates (such as ``yesterday``) are not supported + natively. The ``date`` command can be helpful here:: + + # Archive deleted rows more than one month old + nova-manage db archive --before "$(date -d 'now - 1 month')" + +.. option:: --verbose + + Print how many rows were archived per table. + +.. option:: --until-complete + + Run continuously until all deleted rows are archived. + Use :option:`--max_rows` as a batch size for each iteration. + +.. option:: --purge + + Purge all data from shadow tables after archive completes. + +.. option:: --all-cells + + Run command across all cells. + +.. option:: --task-log + + Also archive ``task_log`` table records. Note that ``task_log`` records are + never deleted, so archiving them will move all of the ``task_log`` records + up to now into the shadow tables. It is recommended to also specify the + :option:`--before` option to avoid races for those consuming ``task_log`` + record data via the `/os-instance_usage_audit_log`__ API (example: + Telemetry). + + .. __: https://docs.openstack.org/api-ref/compute/#server-usage-audit-log-os-instance-usage-audit-log + +.. option:: --sleep + + The amount of time in seconds to sleep between batches when + :option:`--until-complete` is used. Defaults to 0. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Nothing was archived. + * - 1 + - Some number of rows were archived. + * - 2 + - Invalid value for :option:`--max_rows`. + * - 3 + - No connection to the API database could be established using + :oslo.config:option:`api_database.connection`. + * - 4 + - Invalid value for :option:`--before`. + * - 255 + - An unexpected error occurred. + +db purge +-------- + +.. program:: nova-manage db purge + +.. code-block:: shell + + nova-manage db purge [--all] [--before ] [--verbose] [--all-cells] + +Delete rows from shadow tables. For :option:`--all-cells` to work, the API +database connection information must be configured. + +.. versionadded:: 18.0.0 (Rocky) + +.. rubric:: Options + +.. option:: --all + + Purge all rows in the shadow tables. + +.. option:: --before + + Delete data that was archived before ````. Accepts date strings + in the default format output by the ``date`` command, as well as + ``YYYY-MM-DD[HH:mm:ss]``. For example:: + + # Purge shadow table rows older than a specific date + nova-manage db purge --before 2015-10-21 + # or + nova-manage db purge --before "Oct 21 2015" + # Times are also accepted + nova-manage db purge --before "2015-10-21 12:00" + + Note that relative dates (such as ``yesterday``) are not supported + natively. The ``date`` command can be helpful here:: + + # Archive deleted rows more than one month old + nova-manage db purge --before "$(date -d 'now - 1 month')" + +.. option:: --verbose + + Print information about purged records. + +.. option:: --all-cells + + Run against all cell databases. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Rows were deleted. + * - 1 + - Required arguments were not provided. + * - 2 + - Invalid value for :option:`--before`. + * - 3 + - Nothing was purged. + * - 4 + - No connection to the API database could be established using + :oslo.config:option:`api_database.connection`. - nova-manage db - -These sections describe the available categories and arguments for nova-manage. - -Nova Database -~~~~~~~~~~~~~ - -``nova-manage db version`` - Print the current main database version. - -``nova-manage db sync [--version ] [--local_cell]`` - Upgrade the main database schema up to the most recent version or - ``--version`` if specified. By default, this command will also attempt to - upgrade the schema for the cell0 database if it is mapped (see the - ``map_cell0`` or ``simple_cell_setup`` commands for more details on mapping - the cell0 database). If ``--local_cell`` is specified, then only the main - database in the current cell is upgraded. The local database connection is - determined by ``[database]/connection`` in the configuration file passed to - nova-manage. - -``nova-manage db archive_deleted_rows [--max_rows ] [--verbose] [--until-complete] [--purge]`` - Move deleted rows from production tables to shadow tables. Note that the - corresponding rows in the instance_mappings and request_specs tables of the - API database are purged when instance records are archived and thus, - CONF.api_database.connection is required in the config file. Specifying - --verbose will print the results of the archive operation for any tables that - were changed. Specifying --until-complete will make the command run - continuously until all deleted rows are archived. Use the --max_rows option, - which defaults to 1000, as a batch size for each iteration. Specifying --purge - will cause a `full` DB purge to be completed after archival. If a date range - is desired for the purge, then run ``nova-manage db purge --before - `` manually after archiving is complete. - -``nova-manage db purge [--all] [--before ] [--verbose] [--all-cells]`` - Delete rows from shadow tables. Specifying --all will delete all data from - all shadow tables. Specifying --before will delete data from all shadow tables - that is older than the date provided. Date strings may be fuzzy, such as - ``Oct 21 2015``. Specifying --verbose will cause information to be printed about - purged records. Specifying --all-cells will cause the purge to be applied against - all cell databases. For --all-cells to work, the api database connection - information must be configured. Returns exit code 0 if rows were deleted, 1 if - required arguments are not provided, 2 if an invalid date is provided, 3 if no - data was deleted, 4 if the list of cells cannot be obtained. - -``nova-manage db null_instance_uuid_scan [--delete]`` - Lists and optionally deletes database records where instance_uuid is NULL. - -``nova-manage db online_data_migrations [--max-count]`` - Perform data migration to update all live data. Return exit code 0 if - migrations were successful or exit code 1 for partial updates. This command - should be called after upgrading database schema and nova services on all - controller nodes. If the command exits with partial updates (exit code 1) - the command will need to be called again. - - ``--max-count`` controls the maximum number of objects to migrate in a given - call. If not specified, migration will occur in batches of 50 until fully - complete. - -``nova-manage db ironic_flavor_migration [--all] [--host] [--node] [--resource_class]`` - Perform the ironic flavor migration process against the database - while services are offline. This is `not recommended` for most - people. The ironic compute driver will do this online and as - necessary if run normally. This routine is provided only for - advanced users that may be skipping the 16.0.0 Pike release, never - able to run services normally at the Pike level. Since this utility - is for use when all services (including ironic) are down, you must - pass the resource class set on your node(s) with the - ``--resource_class`` parameter. - - To migrate a specific host and node, provide the hostname and node uuid with - ``--host $hostname --node $uuid``. To migrate all instances on nodes managed - by a single host, provide only ``--host``. To iterate over all nodes in the - system in a single pass, use ``--all``. Note that this process is not lightweight, - so it should not be run frequently without cause, although it is not harmful - to do so. If you have multiple cellsv2 cells, you should run this once per cell - with the corresponding cell config for each (i.e. this does not iterate cells - automatically). - - Note that this is not recommended unless you need to run this - specific data migration offline, and it should be used with care as - the work done is non-trivial. Running smaller and more targeted batches (such as - specific nodes) is recommended. - -Nova API Database -~~~~~~~~~~~~~~~~~ - -``nova-manage api_db version`` - Print the current API database version. - -``nova-manage api_db sync [VERSION]`` - Upgrade the API database schema up to the most recent version or - ``[VERSION]`` if specified. This command does not create the API - database, it runs schema migration scripts. The API database connection is - determined by ``[api_database]/connection`` in the configuration file - passed to nova-manage. - - Starting in the 18.0.0 Rocky release, this command will also upgrade the - optional placement database if ``[placement_database]/connection`` is - configured. +db online_data_migrations +------------------------- + +.. program:: nova-manage db online_data_migrations + +.. code-block:: shell + + nova-manage db online_data_migrations [--max-count ] + +Perform data migration to update all live data. + +This command should be called after upgrading database schema and nova services on +all controller nodes. If it exits with partial updates (exit status 1) it should +be called again, even if some updates initially generated errors, because some updates +may depend on others having completed. If it exits with status 2, intervention is +required to resolve the issue causing remaining updates to fail. It should be +considered successfully completed only when the exit status is 0. + +For example:: + + $ nova-manage db online_data_migrations + Running batches of 50 until complete + 2 rows matched query migrate_instances_add_request_spec, 0 migrated + 2 rows matched query populate_queued_for_delete, 2 migrated + +---------------------------------------------+--------------+-----------+ + | Migration | Total Needed | Completed | + +---------------------------------------------+--------------+-----------+ + | create_incomplete_consumers | 0 | 0 | + | migrate_instances_add_request_spec | 2 | 0 | + | migrate_quota_classes_to_api_db | 0 | 0 | + | migrate_quota_limits_to_api_db | 0 | 0 | + | migration_migrate_to_uuid | 0 | 0 | + | populate_missing_availability_zones | 0 | 0 | + | populate_queued_for_delete | 2 | 2 | + | populate_uuids | 0 | 0 | + +---------------------------------------------+--------------+-----------+ + +In the above example, the ``migrate_instances_add_request_spec`` migration +found two candidate records but did not need to perform any kind of data +migration for either of them. In the case of the +``populate_queued_for_delete`` migration, two candidate records were found +which did require a data migration. Since :option:`--max-count` defaults to 50 +and only two records were migrated with no more candidates remaining, the +command completed successfully with exit code 0. + +.. versionadded:: 13.0.0 (Mitaka) + +.. rubric:: Options + +.. option:: --max-count + + Controls the maximum number of objects to migrate in a given call. If not + specified, migration will occur in batches of 50 until fully complete. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - No (further) updates are possible. + * - 1 + - Some updates were completed successfully. Note that not all updates may + have succeeded. + * - 2 + - Some updates generated errors and no other migrations were able to take + effect in the last batch attempted. + * - 127 + - Invalid input was provided. + + +API Database Commands +===================== + +api_db version +-------------- + +.. program:: nova-manage api_db version + +.. code-block:: shell + + nova-manage api_db version + +Print the current API database version. + +.. versionadded:: 2015.1.0 (Kilo) + +api_db sync +----------- + +.. program:: nova-manage api_db sync + +.. code-block:: shell + + nova-manage api_db sync [VERSION] + +Upgrade the API database schema up to the most recent version or +``VERSION`` if specified. This command does not create the API +database, it runs schema migration scripts. The API database connection is +determined by :oslo.config:option:`api_database.connection` in the +configuration file passed to nova-manage. + +This command should be run before ``nova-manage db sync``. + +.. versionadded:: 2015.1.0 (Kilo) + +.. versionchanged:: 18.0.0 (Rocky) + + Added support for upgrading the optional placement database if + ``[placement_database]/connection`` is configured. + +.. versionchanged:: 20.0.0 (Train) + + Removed support for upgrading the optional placement database as placement + is now a separate project. + + Removed support for the legacy ``--version `` argument. + +.. versionchanged:: 24.0.0 (Xena) + + Migrated versioning engine to alembic. The optional ``VERSION`` argument is + now expected to be an alembic-based version. sqlalchemy-migrate-based + versions will be rejected. .. _man-page-cells-v2: -Nova Cells v2 -~~~~~~~~~~~~~ - -``nova-manage cell_v2 simple_cell_setup [--transport-url ]`` - Setup a fresh cells v2 environment; this should not be used if you - currently have a cells v1 environment. If a transport_url is not - specified, it will use the one defined by ``[DEFAULT]/transport_url`` - in the configuration file. Returns 0 if setup is completed - (or has already been done), 1 if no hosts are reporting (and cannot be - mapped), 1 if the transport url is missing, and 2 if run in a cells v1 - environment. - -``nova-manage cell_v2 map_cell0 [--database_connection ]`` - Create a cell mapping to the database connection for the cell0 database. - If a database_connection is not specified, it will use the one defined by - ``[database]/connection`` in the configuration file passed to nova-manage. - The cell0 database is used for instances that have not been scheduled to - any cell. This generally applies to instances that have encountered an - error before they have been scheduled. Returns 0 if cell0 is created - successfully or already setup. - -``nova-manage cell_v2 map_instances --cell_uuid [--max-count ] [--reset]`` - Map instances to the provided cell. Instances in the nova database will - be queried from oldest to newest and mapped to the provided cell. A - max_count can be set on the number of instance to map in a single run. - Repeated runs of the command will start from where the last run finished - so it is not necessary to increase max-count to finish. A reset option - can be passed which will reset the marker, thus making the command start - from the beginning as opposed to the default behavior of starting from - where the last run finished. Returns 0 if all instances have been mapped, - and 1 if there are still instances to be mapped. - - If ``--max-count`` is not specified, all instances in the cell will be - mapped in batches of 50. If you have a large number of instances, consider - specifying a custom value and run the command until it exits with 0. - -``nova-manage cell_v2 map_cell_and_hosts [--name ] [--transport-url ] [--verbose]`` - Create a cell mapping to the database connection and message queue - transport url, and map hosts to that cell. The database connection - comes from the ``[database]/connection`` defined in the configuration - file passed to nova-manage. If a transport_url is not specified, it will - use the one defined by ``[DEFAULT]/transport_url`` in the configuration - file. This command is idempotent (can be run multiple times), and the - verbose option will print out the resulting cell mapping uuid. Returns 0 - on successful completion, and 1 if the transport url is missing. - -``nova-manage cell_v2 verify_instance --uuid [--quiet]`` - Verify instance mapping to a cell. This command is useful to determine if - the cells v2 environment is properly setup, specifically in terms of the - cell, host, and instance mapping records required. Returns 0 when the - instance is successfully mapped to a cell, 1 if the instance is not - mapped to a cell (see the ``map_instances`` command), 2 if the cell - mapping is missing (see the ``map_cell_and_hosts`` command if you are - upgrading from a cells v1 environment, and the ``simple_cell_setup`` if - you are upgrading from a non-cells v1 environment), 3 if it is a deleted - instance which has instance mapping, and 4 if it is an archived instance - which still has an instance mapping. - -``nova-manage cell_v2 create_cell [--name ] [--transport-url ] [--database_connection ] [--verbose] [--disabled]`` - Create a cell mapping to the database connection and message queue - transport url. If a database_connection is not specified, it will use the - one defined by ``[database]/connection`` in the configuration file passed - to nova-manage. If a transport_url is not specified, it will use the one - defined by ``[DEFAULT]/transport_url`` in the configuration file. The - verbose option will print out the resulting cell mapping uuid. All the - cells created are by default enabled. However passing the ``--disabled`` option - can create a pre-disabled cell, meaning no scheduling will happen to this - cell. The meaning of the various exit codes returned by this command are - explained below: - - * Returns 0 if the cell mapping was successfully created. - * Returns 1 if the transport url or database connection was missing. - * Returns 2 if another cell is already using that transport url and/or - database connection combination. - -``nova-manage cell_v2 discover_hosts [--cell_uuid ] [--verbose] [--strict] [--by-service]`` - Searches cells, or a single cell, and maps found hosts. This command will - check the database for each cell (or a single one if passed in) and map any - hosts which are not currently mapped. If a host is already mapped nothing - will be done. You need to re-run this command each time you add more - compute hosts to a cell (otherwise the scheduler will never place instances - there and the API will not list the new hosts). If the strict option is - provided the command will only be considered successful if an unmapped host - is discovered (exit code 0). Any other case is considered a failure (exit - code 1). If --by-service is specified, this command will look in the - appropriate cell(s) for any nova-compute services and ensure there are host - mappings for them. This is less efficient and is only necessary when using - compute drivers that may manage zero or more actual compute nodes at any - given time (currently only ironic). - -``nova-manage cell_v2 list_cells [--verbose]`` - By default the cell name, uuid, disabled state, masked transport URL and - database connection details are shown. Use the --verbose option to see - transport URL and database connection with their sensitive details. - -``nova-manage cell_v2 delete_cell [--force] --cell_uuid `` - Delete a cell by the given uuid. Returns 0 if the empty cell is found and - deleted successfully or the cell that has hosts is found and the cell, hosts - and the instance_mappings are deleted successfully with ``--force`` option - (this happens if there are no living instances), 1 if a cell with that uuid - could not be found, 2 if host mappings were found for the cell (cell not empty) - without ``--force`` option, 3 if there are instances mapped to the cell - (cell not empty) irrespective of the ``--force`` option, and 4 if there are - instance mappings to the cell but all instances have been deleted in the cell, - again without the ``--force`` option. - -``nova-manage cell_v2 list_hosts [--cell_uuid ]`` - Lists the hosts in one or all v2 cells. By default hosts in all v2 cells - are listed. Use the --cell_uuid option to list hosts in a specific cell. - If the cell is not found by uuid, this command will return an exit code - of 1. Otherwise, the exit code will be 0. - -``nova-manage cell_v2 update_cell --cell_uuid [--name ] [--transport-url ] [--database_connection ] [--disable] [--enable]`` - Updates the properties of a cell by the given uuid. If a - database_connection is not specified, it will attempt to use the one - defined by ``[database]/connection`` in the configuration file. If a - transport_url is not specified, it will attempt to use the one defined by - ``[DEFAULT]/transport_url`` in the configuration file. The meaning of the - various exit codes returned by this command are explained below: - - * If successful, it will return 0. - * If the cell is not found by the provided uuid, it will return 1. - * If the properties cannot be set, it will return 2. - * If the provided transport_url or/and database_connection is/are same as - another cell, it will return 3. - * If an attempt is made to disable and enable a cell at the same time, it - will return 4. - * If an attempt is made to disable or enable cell0 it will return 5. - - .. note:: - - Updating the ``transport_url`` or ``database_connection`` fields on a - running system will NOT result in all nodes immediately using the new - values. Use caution when changing these values. - - The scheduler will not notice that a cell has been enabled/disabled until - it is restarted or sent the SIGHUP signal. - -``nova-manage cell_v2 delete_host --cell_uuid --host `` - Delete a host by the given host name and the given cell uuid. Returns 0 - if the empty host is found and deleted successfully, 1 if a cell with - that uuid could not be found, 2 if a host with that name could not be - found, 3 if a host with that name is not in a cell with that uuid, 4 if - a host with that name has instances (host not empty). - - -Placement -~~~~~~~~~ - -``nova-manage placement heal_allocations [--max-count ] [--verbose]`` - Iterates over non-cell0 cells looking for instances which do not have - allocations in the Placement service and which are not undergoing a task - state transition. For each instance found, allocations are created against - the compute node resource provider for that instance based on the flavor - associated with the instance. - - There is also a special case handled for instances that *do* have - allocations created before Placement API microversion 1.8 where project_id - and user_id values were required. For those types of allocations, the - project_id and user_id are updated using the values from the instance. - - Specify ``--max-count`` to control the maximum number of instances to - process. If not specified, all instances in each cell will be mapped in - batches of 50. If you have a large number of instances, consider - specifying a custom value and run the command until it exits with 0 or 4. - - Specify ``--verbose`` to get detailed progress output during execution. - - This command requires that the ``[api_database]/connection`` and - ``[placement]`` configuration options are set. Placement API >= 1.28 is - required. - - Return codes: - - * 0: Command completed successfully and allocations were created. - * 1: --max-count was reached and there are more instances to process. - * 2: Unable to find a compute node record for a given instance. - * 3: Unable to create (or update) allocations for an instance against its - compute node resource provider. - * 4: Command completed successfully but no allocations were created. - * 127: Invalid input. - -``nova-manage placement sync_aggregates [--verbose]`` - Mirrors compute host aggregates to resource provider aggregates - in the Placement service. Requires the ``[api_database]`` and - ``[placement]`` sections of the nova configuration file to be - populated. - - Specify ``--verbose`` to get detailed progress output during execution. - - .. note:: Depending on the size of your deployment and the number of - compute hosts in aggregates, this command could cause a non-negligible - amount of traffic to the placement service and therefore is - recommended to be run during maintenance windows. - - .. versionadded:: Rocky - - Return codes: - - * 0: Successful run - * 1: A host was found with more than one matching compute node record - * 2: An unexpected error occurred while working with the placement API - * 3: Failed updating provider aggregates in placement - * 4: Host mappings not found for one or more host aggregate members - * 5: Compute node records not found for one or more hosts - * 6: Resource provider not found by uuid for a given host +Cells v2 Commands +================= + +cell_v2 simple_cell_setup +------------------------- + +.. program:: nova-manage cell_v2 simple_cell_setup + +.. code-block:: shell + + nova-manage cell_v2 simple_cell_setup [--transport-url ] + +Setup a fresh cells v2 environment. If :option:`--transport-url` is not +specified, it will use the one defined by :oslo.config:option:`transport_url` +in the configuration file. + +.. versionadded:: 14.0.0 (Newton) + +.. rubric:: Options + +.. option:: --transport-url + + The transport url for the cell message queue. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Setup is completed. + * - 1 + - No hosts are reporting, meaning none can be mapped, or if the transport + URL is missing or invalid. + +cell_v2 map_cell0 +----------------- + +.. program:: nova-manage cell_v2 map_cell0 + +.. code-block:: shell + + nova-manage cell_v2 map_cell0 [--database_connection ] + +Create a cell mapping to the database connection for the cell0 database. +If a database_connection is not specified, it will use the one defined by +:oslo.config:option:`database.connection` in the configuration file passed +to nova-manage. The cell0 database is used for instances that have not been +scheduled to any cell. This generally applies to instances that have +encountered an error before they have been scheduled. + +.. versionadded:: 14.0.0 (Newton) + +.. rubric:: Options + +.. option:: --database_connection + + The database connection URL for ``cell0``. This is optional. If not + provided, a standard database connection will be used based on the main + database connection from nova configuration. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - ``cell0`` is created successfully or has already been set up. + +cell_v2 map_instances +--------------------- + +.. program:: nova-manage cell_v2 map_instances + +.. code-block:: shell + + nova-manage cell_v2 map_instances --cell_uuid + [--max-count ] [--reset] + +Map instances to the provided cell. Instances in the nova database will +be queried from oldest to newest and mapped to the provided cell. +A :option:`--max-count` can be set on the number of instance to map in a single +run. Repeated runs of the command will start from where the last run finished +so it is not necessary to increase :option:`--max-count` to finish. +A :option:`--reset` option can be passed which will reset the marker, thus +making the command start from the beginning as opposed to the default behavior +of starting from where the last run finished. + +If :option:`--max-count` is not specified, all instances in the cell will be +mapped in batches of 50. If you have a large number of instances, consider +specifying a custom value and run the command until it exits with 0. + +.. versionadded:: 12.0.0 (Liberty) + +.. rubric:: Options + +.. option:: --cell_uuid + + Unmigrated instances will be mapped to the cell with the UUID provided. + +.. option:: --max-count + + Maximum number of instances to map. If not set, all instances in the cell + will be mapped in batches of 50. If you have a large number of instances, + consider specifying a custom value and run the command until it exits with + 0. + +.. option:: --reset + + The command will start from the beginning as opposed to the default + behavior of starting from where the last run finished. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - All instances have been mapped. + * - 1 + - There are still instances to be mapped. + * - 127 + - Invalid value for :option:`--max-count`. + * - 255 + - An unexpected error occurred. + +cell_v2 map_cell_and_hosts +-------------------------- + +.. program:: nova-manage cell_v2 map_cell_and_hosts + +.. code-block:: shell + + nova-manage cell_v2 map_cell_and_hosts [--name ] + [--transport-url ] [--verbose] + +Create a cell mapping to the database connection and message queue +transport URL, and map hosts to that cell. The database connection +comes from the :oslo.config:option:`database.connection` defined in the +configuration file passed to nova-manage. If :option:`--transport-url` is not +specified, it will use the one defined by +:oslo.config:option:`transport_url` in the configuration file. This command +is idempotent (can be run multiple times), and the verbose option will +print out the resulting cell mapping UUID. + +.. versionadded:: 13.0.0 (Mitaka) + +.. rubric:: Options + +.. option:: --transport-url + + The transport url for the cell message queue. + +.. option:: --name + + The name of the cell. + +.. option:: --verbose + + Output the cell mapping uuid for any newly mapped hosts. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Successful completion. + * - 1 + - The transport url is missing or invalid + +cell_v2 verify_instance +----------------------- + +.. program:: nova-manage cell_v2 verify_instance + +.. code-block:: shell + + nova-manage cell_v2 verify_instance --uuid [--quiet] + +Verify instance mapping to a cell. This command is useful to determine if +the cells v2 environment is properly setup, specifically in terms of the +cell, host, and instance mapping records required. + +.. versionadded:: 14.0.0 (Newton) + +.. rubric:: Options + +.. option:: --uuid + + The instance UUID to verify. + +.. option:: --quiet + + Do not print anything. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - The instance was successfully mapped to a cell. + * - 1 + - The instance is not mapped to a cell. See the ``map_instances`` + command. + * - 2 + - The cell mapping is missing. See the ``map_cell_and_hots`` command if + you are upgrading from a cells v1 environment, and the + ``simple_cell_setup`` command if you are upgrading from a non-cells v1 + environment. + * - 3 + - The instance is a deleted instance that still has an instance mapping. + * - 4 + - The instance is an archived instance that still has an instance mapping. + +cell_v2 create_cell +------------------- + +.. program:: nova-manage cell_v2 create_cell + +.. code-block:: shell + + nova-manage cell_v2 create_cell [--name ] + [--transport-url ] + [--database_connection ] [--verbose] [--disabled] + +Create a cell mapping to the database connection and message queue +transport URL. If a database_connection is not specified, it will use the +one defined by :oslo.config:option:`database.connection` in the +configuration file passed to nova-manage. If :option:`--transport-url` is not +specified, it will use the one defined by +:oslo.config:option:`transport_url` in the configuration file. The verbose +option will print out the resulting cell mapping UUID. All the cells +created are by default enabled. However passing the :option:`--disabled` option +can create a pre-disabled cell, meaning no scheduling will happen to this +cell. + +.. versionadded:: 15.0.0 (Ocata) + +.. versionchanged:: 18.0.0 (Rocky) + + Added :option:`--disabled` option. + +.. rubric:: Options + +.. option:: --name + + The name of the cell. + +.. option:: --database_connection + + The database URL for the cell database. + +.. option:: --transport-url + + The transport url for the cell message queue. + +.. option:: --verbose + + Output the UUID of the created cell. + +.. option:: --disabled + + Create a pre-disabled cell. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - The cell mapping was successfully created. + * - 1 + - The transport URL or database connection was missing or invalid. + * - 2 + - Another cell is already using the provided transport URL and/or database + connection combination. + +cell_v2 discover_hosts +---------------------- + +.. program:: nova-manage cell_v2 discover_hosts + +.. code-block:: shell + + nova-manage cell_v2 discover_hosts [--cell_uuid ] [--verbose] + [--strict] [--by-service] + +Searches cells, or a single cell, and maps found hosts. This command will +check the database for each cell (or a single one if passed in) and map any +hosts which are not currently mapped. If a host is already mapped, nothing +will be done. You need to re-run this command each time you add a batch of +compute hosts to a cell (otherwise the scheduler will never place instances +there and the API will not list the new hosts). If :option:`--strict` is +specified, the command will only return 0 if an unmapped host was discovered +and mapped successfully. If :option:`--by-service` is specified, this command will +look in the appropriate cell(s) for any nova-compute services and ensure there +are host mappings for them. This is less efficient and is only necessary +when using compute drivers that may manage zero or more actual compute +nodes at any given time (currently only ironic). + +This command should be run once after all compute hosts have been deployed +and should not be run in parallel. When run in parallel, the commands will +collide with each other trying to map the same hosts in the database at the +same time. + +.. versionadded:: 14.0.0 (Newton) + +.. versionchanged:: 16.0.0 (Pike) + + Added :option:`--strict` option. + +.. versionchanged:: 18.0.0 (Rocky) + + Added :option:`--by-service` option. + +.. rubric:: Options + +.. option:: --cell_uuid + + If provided only this cell will be searched for new hosts to map. + +.. option:: --verbose + + Provide detailed output when discovering hosts. + +.. option:: --strict + + Considered successful (exit code 0) only when an unmapped host is + discovered. Any other outcome will be considered a failure (non-zero exit + code). + +.. option:: --by-service + + Discover hosts by service instead of compute node. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Hosts were successfully mapped or no hosts needed to be mapped. If + :option:`--strict` is specified, returns 0 only if an unmapped host was + discovered and mapped. + * - 1 + - If :option:`--strict` is specified and no unmapped hosts were found. + Also returns 1 if an exception was raised while running. + * - 2 + - The command was aborted because of a duplicate host mapping found. This + means the command collided with another running ``discover_hosts`` + command or scheduler periodic task and is safe to retry. + +cell_v2 list_cells +------------------ + +.. program:: nova-manage cell_v2 list_cells + +.. code-block:: shell + + nova-manage cell_v2 list_cells [--verbose] + +By default the cell name, UUID, disabled state, masked transport URL and +database connection details are shown. Use the :option:`--verbose` option to +see transport URL and database connection with their sensitive details. + +.. versionadded:: 15.0.0 (Ocata) + +.. versionchanged:: 18.0.0 (Rocky) + + Added the ``disabled`` column to output. + +.. rubric:: Options + +.. option:: --verbose + + Show sensitive details, such as passwords. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Success. + +cell_v2 delete_cell +------------------- + +.. program:: nova-manage cell_v2 delete_cell + +.. code-block:: shell + + nova-manage cell_v2 delete_cell [--force] --cell_uuid + +Delete a cell by the given UUID. + +.. versionadded:: 15.0.0 (Ocata) + +.. rubric:: Options + +.. option:: --force + + Delete hosts and instance_mappings that belong to the cell as well. + +.. option:: --cell_uuid + + The UUID of the cell to delete. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - An empty cell was found and deleted successfully or a cell that has + hosts was found and the cell, hosts and the instance_mappings were + deleted successfully with :option:`--force` option (this happens if there are + no living instances). + * - 1 + - A cell with the provided UUID could not be found. + * - 2 + - Host mappings were found for the cell, meaning the cell is not empty, + and the :option:`--force` option was not provided. + * - 3 + - There are active instances mapped to the cell (cell not empty). + * - 4 + - There are (inactive) instances mapped to the cell and the + :option:`--force` option was not provided. + +cell_v2 list_hosts +------------------ + +.. program:: nova-manage cell_v2 list_hosts + +.. code-block:: shell + + nova-manage cell_v2 list_hosts [--cell_uuid ] + +Lists the hosts in one or all v2 cells. By default hosts in all v2 cells +are listed. Use the :option:`--cell_uuid` option to list hosts in a specific cell. + +.. versionadded:: 17.0.0 (Queens) + +.. rubric:: Options + +.. option:: --cell_uuid + + The UUID of the cell. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Success. + * - 1 + - The cell indicated by :option:`--cell_uuid` was not found. + +cell_v2 update_cell +------------------- + +.. program:: nova-manage cell_v2 update_cell + +.. code-block:: shell + + nova-manage cell_v2 update_cell --cell_uuid + [--name ] [--transport-url ] + [--database_connection ] [--disable] [--enable] + +Updates the properties of a cell by the given uuid. If a +database_connection is not specified, it will attempt to use the one +defined by :oslo.config:option:`database.connection` in the configuration +file. If a transport_url is not specified, it will attempt to use the one +defined by :oslo.config:option:`transport_url` in the configuration file. + +.. note:: + + Updating the ``transport_url`` or ``database_connection`` fields on a + running system will NOT result in all nodes immediately using the new + values. Use caution when changing these values. + + The scheduler will not notice that a cell has been enabled/disabled until + it is restarted or sent the SIGHUP signal. + +.. versionadded:: 16.0.0 (Pike) + +.. versionchanged:: 18.0.0 (Rocky) + + Added :option:`--enable`, :option:`--disable` options. + +.. rubric:: Options + +.. option:: --cell_uuid + + The UUID of the cell to update. + +.. option:: --name + + Set the cell name. + +.. option:: --transport-url + + Set the cell ``transport_url``. Note that running nodes will not see + the change until restarted or the ``SIGHUP`` signal is sent. + +.. option:: --database_connection + + Set the cell ``database_connection``. Note that running nodes will not see + the change until restarted or the ``SIGHUP`` signal is sent. + +.. option:: --disable + + Disables the cell. Note that the scheduling will be blocked to this cell + until it is enabled and the ``nova-scheduler`` service is restarted or + the ``SIGHUP`` signal is sent. + +.. option:: --enable + + Enables the cell. Note that the ``nova-scheduler`` service will not see the + change until it is restarted or the ``SIGHUP`` signal is sent. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Success. + * - 1 + - The cell was not found by the provided UUID. + * - 2 + - The specified properties could not be set. + * - 3 + - The provided :option:`--transport-url` or/and + :option:`--database_connection` parameters were same as another cell. + * - 4 + - An attempt was made to disable and enable a cell at the same time. + * - 5 + - An attempt was made to disable or enable cell0. + +cell_v2 delete_host +------------------- + +.. program:: nova-manage cell_v2 delete_host + +.. code-block:: shell + + nova-manage cell_v2 delete_host --cell_uuid --host + +Delete a host by the given host name and the given cell UUID. + +.. versionadded:: 17.0.0 (Queens) + +.. note:: + + The scheduler caches host-to-cell mapping information so when deleting + a host the scheduler may need to be restarted or sent the SIGHUP signal. + +.. rubric:: Options + +.. option:: --cell_uuid + + The UUID of the cell. + +.. option:: --host + + The host to delete. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - The empty host was found and deleted successfully + * - 1 + - A cell with the specified UUID could not be found. + * - 2 + - A host with the specified name could not be found + * - 3 + - The host with the specified name is not in a cell with the specified UUID. + * - 4 + - The host with the specified name has instances (host not empty). + +Placement Commands +================== + +.. _heal_allocations_cli: + +placement heal_allocations +-------------------------- + +.. program:: nova-manage placement heal_allocations + +.. code-block:: shell + + nova-manage placement heal_allocations [--max-count ] + [--verbose] [--skip-port-allocations] [--dry-run] + [--instance ] [--cell `) but the corresponding +allocation is not found then the allocation is created against the +network device resource providers according to the resource request of +that port. It is possible that the missing allocation cannot be created +either due to not having enough resource inventory on the host the instance +resides on or because more than one resource provider could fulfill the +request. In this case the instance needs to be manually deleted or the +port needs to be detached. When nova `supports migrating instances +with guaranteed bandwidth ports`__, migration will heal missing allocations +for these instances. + +.. __: https://specs.openstack.org/openstack/nova-specs/specs/train/approved/support-move-ops-with-qos-ports.html + +Before the allocations for the ports are persisted in placement nova-manage +tries to update each port in neutron to refer to the resource provider UUID +which provides the requested resources. If any of the port updates fail in +neutron or the allocation update fails in placement the command tries to +roll back the partial updates to the ports. If the roll back fails +then the process stops with exit code ``7`` and the admin needs to do the +rollback in neutron manually according to the description in the exit code +section. + +There is also a special case handled for instances that *do* have +allocations created before Placement API microversion 1.8 where project_id +and user_id values were required. For those types of allocations, the +project_id and user_id are updated using the values from the instance. + +This command requires that the +:oslo.config:option:`api_database.connection` and +:oslo.config:group:`placement` configuration options are set. Placement API +>= 1.28 is required. + +.. versionadded:: 18.0.0 (Rocky) + +.. versionchanged:: 20.0.0 (Train) + + Added :option:`--dry-run`, :option:`--instance`, and + :option:`--skip-port-allocations` options. + +.. versionchanged:: 21.0.0 (Ussuri) + + Added :option:`--cell` option. + +.. versionchanged:: 22.0.0 (Victoria) + + Added :option:`--force` option. + +.. rubric:: Options + +.. option:: --max-count + + Maximum number of instances to process. If not specified, all instances in + each cell will be mapped in batches of 50. If you have a large number of + instances, consider specifying a custom value and run the command until it + exits with 0 or 4. + +.. option:: --verbose + + Provide verbose output during execution. + +.. option:: --dry-run + + Runs the command and prints output but does not commit any changes. The + return code should be 4. + +.. option:: --instance + + UUID of a specific instance to process. If specified :option:`--max-count` + has no effect. Mutually exclusive with :option:`--cell`. + +.. option:: --skip-port-allocations + + Skip the healing of the resource allocations of bound ports. E.g. healing + bandwidth resource allocation for ports having minimum QoS policy rules + attached. If your deployment does not use such a feature then the + performance impact of querying neutron ports for each instance can be + avoided with this flag. + +.. option:: --cell + + Heal allocations within a specific cell. Mutually exclusive with + :option:`--instance`. + +.. option:: --force + + Force heal allocations. Requires the :option:`--instance` argument. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Command completed successfully and allocations were created. + * - 1 + - :option:`--max-count` was reached and there are more instances to + process. + * - 2 + - Unable to find a compute node record for a given instance. + * - 3 + - Unable to create (or update) allocations for an instance against its + compute node resource provider. + * - 4 + - Command completed successfully but no allocations were created. + * - 5 + - Unable to query ports from neutron + * - 6 + - Unable to update ports in neutron + * - 7 + - Cannot roll back neutron port updates. Manual steps needed. The + error message will indicate which neutron ports need to be changed + to clean up ``binding:profile`` of the port:: + + $ openstack port unset --binding-profile allocation + + * - 127 + - Invalid input. + * - 255 + - An unexpected error occurred. + +.. _sync_aggregates_cli: + +placement sync_aggregates +------------------------- + +.. program:: nova-manage placement sync_aggregates + +.. code-block:: shell + + nova-manage placement sync_aggregates [--verbose] + +Mirrors compute host aggregates to resource provider aggregates +in the Placement service. Requires the :oslo.config:group:`api_database` +and :oslo.config:group:`placement` sections of the nova configuration file +to be populated. + +Specify :option:`--verbose` to get detailed progress output during execution. + +.. note:: + + Depending on the size of your deployment and the number of + compute hosts in aggregates, this command could cause a non-negligible + amount of traffic to the placement service and therefore is + recommended to be run during maintenance windows. + +.. versionadded:: 18.0.0 (Rocky) + +.. rubric:: Options + +.. option:: --verbose + + Provide verbose output during execution. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Successful run + * - 1 + - A host was found with more than one matching compute node record + * - 2 + - An unexpected error occurred while working with the placement API + * - 3 + - Failed updating provider aggregates in placement + * - 4 + - Host mappings not found for one or more host aggregate members + * - 5 + - Compute node records not found for one or more hosts + * - 6 + - Resource provider not found by uuid for a given host + * - 255 + - An unexpected error occurred. + +placement audit +--------------- + +.. program:: nova-manage placement audit + +.. code-block:: shell + + nova-manage placement audit [--verbose] [--delete] + [--resource_provider ] + +Iterates over all the Resource Providers (or just one if you provide the +UUID) and then verifies if the compute allocations are either related to +an existing instance or a migration UUID. If not, it will tell which +allocations are orphaned. + +This command requires that the +:oslo.config:option:`api_database.connection` and +:oslo.config:group:`placement` configuration options are set. Placement API +>= 1.14 is required. + +.. versionadded:: 21.0.0 (Ussuri) + +.. rubric:: Options + +.. option:: --verbose + + Provide verbose output during execution. + +.. option:: --resource_provider + + UUID of a specific resource provider to verify. + +.. option:: --delete + + Deletes orphaned allocations that were found. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - No orphaned allocations were found + * - 1 + - An unexpected error occurred + * - 3 + - Orphaned allocations were found + * - 4 + - All found orphaned allocations were deleted + * - 127 + - Invalid input + + +Volume Attachment Commands +========================== + +volume_attachment get_connector +------------------------------- + +.. program:: nova-manage volume_attachment get_connector + +.. code-block:: shell + + nova-manage volume_attachment get_connector + +Show the host connector for this compute host. + +When called with the ``--json`` switch this dumps a JSON string containing the +connector information for the current host, which can be saved to a file and +used as input for the :program:`nova-manage volume_attachment refresh` command. + +.. versionadded:: 24.0.0 (Xena) + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Success + * - 1 + - An unexpected error occurred + +volume_attachment show +---------------------- + +.. program:: nova-manage volume_attachment show + +.. code-block:: shell + + nova-manage volume_attachment show [INSTANCE_UUID] [VOLUME_ID] + +Show the details of a the volume attachment between ``VOLUME_ID`` and +``INSTANCE_UUID``. + +.. versionadded:: 24.0.0 (Xena) + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Success + * - 1 + - An unexpected error occurred + * - 2 + - Instance not found + * - 3 + - Instance is not attached to volume + +volume_attachment refresh +------------------------- + +.. program:: nova-manage volume_attachment refresh + +.. code-block:: shell + + nova-manage volume_attachment refresh [INSTANCE_UUID] [VOLUME_ID] [CONNECTOR_PATH] + +Refresh the connection info associated with a given volume attachment. + +The instance must be attached to the volume, have a ``vm_state`` of ``stopped`` +and not be ``locked``. + +``CONNECTOR_PATH`` should be the path to a JSON-formatted file containing up to +date connector information for the compute currently hosting the instance as +generated using the :program:`nova-manage volume_attachment get_connector` +command. + +.. versionadded:: 24.0.0 (Xena) + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Success + * - 1 + - An unexpected error occurred + * - 2 + - Connector path does not exist + * - 3 + - Failed to open connector path + * - 4 + - Instance does not exist + * - 5 + - Instance state invalid (must be stopped and unlocked) + * - 6 + - Instance is not attached to volume + +Libvirt Commands +================ + +libvirt get_machine_type +------------------------ + +.. program:: nova-manage libvirt get_machine_type + +.. code-block:: shell + + nova-manage libvirt get_machine_type [INSTANCE_UUID] + +Fetch and display the recorded machine type of a libvirt instance identified +by ``INSTANCE_UUID``. + +.. versionadded:: 23.0.0 (Wallaby) + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Successfully completed + * - 1 + - An unexpected error occurred + * - 2 + - Unable to find instance or instance mapping + * - 3 + - No machine type found for instance + +libvirt update_machine_type +--------------------------- + +.. program:: nova-manage libvirt update_machine_type + +.. code-block:: shell + + nova-manage libvirt update_machine_type \ + [INSTANCE_UUID] [MACHINE_TYPE] [--force] + +Set or update the recorded machine type of instance ``INSTANCE_UUID`` to +machine type ``MACHINE_TYPE``. + +The following criteria must be met when using this command: + +* The instance must have a ``vm_state`` of ``STOPPED``, ``SHELVED`` or + ``SHELVED_OFFLOADED``. + +* The machine type must be supported. The supported list includes alias and + versioned types of ``pc``, ``pc-i440fx``, ``pc-q35``, ``q35``, ``virt`` + or ``s390-ccw-virtio``. + +* The update will not move the instance between underlying machine types. + For example, ``pc`` to ``q35``. + +* The update will not move the instance between an alias and versioned + machine type or vice versa. For example, ``pc`` to ``pc-1.2.3`` or + ``pc-1.2.3`` to ``pc``. + +A ``--force`` flag is provided to skip the above checks but caution +should be taken as this could easily lead to the underlying ABI of the +instance changing when moving between machine types. + +.. versionadded:: 23.0.0 (Wallaby) + +.. rubric:: Options + +.. option:: --force + + Skip machine type compatability checks and force machine type update. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Update completed successfully + * - 1 + - An unexpected error occurred + * - 2 + - Unable to find instance or instance mapping + * - 3 + - The instance has an invalid ``vm_state`` + * - 4 + - The proposed update of the machine type is invalid + * - 5 + - The provided machine type is unsupported + +libvirt list_unset_machine_type +------------------------------- + +.. program:: nova-manage libvirt list_unset_machine_type + +.. code-block:: shell + + nova-manage libvirt list_unset_machine_type [--cell-uuid ] + +List the UUID of any instance without ``hw_machine_type`` set. + +This command is useful for operators attempting to determine when it is +safe to change the :oslo.config:option:`libvirt.hw_machine_type` option +within an environment. + +.. versionadded:: 23.0.0 (Wallaby) + +.. rubric:: Options + +.. option:: --cell_uuid + + The UUID of the cell to list instances from. + +.. rubric:: Return codes + +.. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Return code + - Description + * - 0 + - Completed successfully, no instances found without ``hw_machine_type`` + set + * - 1 + - An unexpected error occurred + * - 2 + - Unable to find cell mapping + * - 3 + - Instances found without ``hw_machine_type`` set See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-policy(1) `, +:doc:`nova-status(1) ` Bugs ==== diff --git a/doc/source/cli/nova-network.rst b/doc/source/cli/nova-network.rst deleted file mode 100644 index 75234e35f0b..00000000000 --- a/doc/source/cli/nova-network.rst +++ /dev/null @@ -1,53 +0,0 @@ -============ -nova-network -============ - -------------------- -Nova Network Server -------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing - -Synopsis -======== - -:: - - nova-network [options] - -Description -=========== - -:program:`nova-network` is a server daemon that serves the Nova Network -service, which is responsible for allocating IPs and setting up the network - -.. deprecated:: 14.0.0 - - :program:`nova-network` is deprecated and will be removed in an upcoming - release. Use *neutron* or another networking solution instead. - -Options -======= - -**General options** - -Files -===== - -* ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` -* ``/etc/nova/rootwrap.conf`` -* ``/etc/nova/rootwrap.d/`` - -See Also -======== - -* :nova-doc:`OpenStack Nova <>` - -Bugs -==== - -* Nova bugs are managed at `Launchpad `__ diff --git a/doc/source/cli/nova-novncproxy.rst b/doc/source/cli/nova-novncproxy.rst index 2811474e35b..f2df84e2cca 100644 --- a/doc/source/cli/nova-novncproxy.rst +++ b/doc/source/cli/nova-novncproxy.rst @@ -2,21 +2,14 @@ nova-novncproxy =============== -------------------------------------------------------- -Websocket novnc Proxy for OpenStack Nova noVNC consoles -------------------------------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-novncproxy Synopsis ======== :: - nova-novncproxy [options] + nova-novncproxy [...] Description =========== @@ -28,20 +21,73 @@ with OpenStack Nova noVNC consoles. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Websockify options + +.. include:: opts/websockify.rst + +.. rubric:: VNC options + +.. option:: --vnc-auth_schemes VNC_AUTH_SCHEMES + + The authentication schemes to use with the compute node. Control what RFB + authentication schemes are permitted for connections between the proxy and + the compute host. If multiple schemes are enabled, the first matching + scheme will be used, thus the strongest schemes should be listed first. + +.. option:: --vnc-novncproxy_host VNC_NOVNCPROXY_HOST + + IP address that the noVNC console proxy should bind to. The VNC proxy is an + OpenStack component that enables compute service users to access their + instances through VNC clients. noVNC provides VNC support through a + websocket-based client. This option sets the private address to which the + noVNC console proxy service should bind to. + +.. option:: --vnc-novncproxy_port VNC_NOVNCPROXY_PORT + + Port that the noVNC console proxy should bind to. The VNC proxy is an + OpenStack component that enables compute service users to access their + instances through VNC clients. noVNC provides VNC support through a + websocket-based client. This option sets the private port to which the + noVNC console proxy service should bind to. + +.. option:: --vnc-vencrypt_ca_certs VNC_VENCRYPT_CA_CERTS + + The path to the CA certificate PEM file The fully qualified path to a PEM + file containing one or more x509 certificates for the certificate + authorities used by the compute node VNC server. + +.. option:: --vnc-vencrypt_client_cert VNC_VENCRYPT_CLIENT_CERT + + The path to the client key file (for x509) The fully qualified path to a + PEM file containing the x509 certificate which the VNC proxy server + presents to the compute node during VNC authentication. + +.. option:: --vnc-vencrypt_client_key VNC_VENCRYPT_CLIENT_KEY + + The path to the client certificate PEM file (for x509) The fully qualified + path to a PEM file containing the private key which the VNC proxy server + presents to the compute node during VNC authentication. + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== * ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-serialproxy(1) `, +:doc:`nova-spicehtml5proxy(1) ` Bugs ==== diff --git a/doc/source/cli/nova-policy.rst b/doc/source/cli/nova-policy.rst new file mode 100644 index 00000000000..480c3b2d983 --- /dev/null +++ b/doc/source/cli/nova-policy.rst @@ -0,0 +1,94 @@ +=========== +nova-policy +=========== + +.. program:: nova-policy + +Synopsis +======== + +:: + + nova-policy [...] + +Description +=========== + +:program:`nova-policy` is a tool that allows for inspection of policy file +configuration. It provides a way to identify the actions available for a user. +It does not require a running deployment: validation runs against the policy +files typically located at ``/etc/nova/policy.yaml`` and in the +``/etc/nova/policy.d`` directory. These paths are configurable via the +``[oslo_config] policy_file`` and ``[oslo_config] policy_dirs`` configuration +options, respectively. + +Options +======= + +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: User options + +.. option:: --os-roles + + Defaults to ``$OS_ROLES``. + +.. option:: --os-tenant-id + + Defaults to ``$OS_TENANT_ID``. + +.. option:: --os-user-id + + Defaults to ``$OS_USER_ID``. + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst + +Commands +======== + +policy check +------------ + +:: + + nova-policy policy check [-h] [--api-name ] + [--target [...] + +Prints all passing policy rules for the given user. + +.. rubric:: Options + +.. option:: --api-name + + Return only the passing policy rules containing the given API name. + If unspecified, all passing policy rules will be returned. + +.. option:: --target [...] + + The target(s) against which the policy rule authorization will be tested. + The available targets are: ``project_id``, ``user_id``, ``quota_class``, + ``availability_zone``, ``instance_id``. + When ``instance_id`` is used, the other targets will be overwritten. + If unspecified, the given user will be considered as the target. + +Files +===== + +* ``/etc/nova/nova.conf`` +* ``/etc/nova/policy.yaml`` +* ``/etc/nova/policy.d/`` + +See Also +======== + +:doc:`nova-manage(1) `, +:doc:`nova-status(1) ` + +Bugs +==== + +* Nova bugs are managed at `Launchpad `__ diff --git a/doc/source/cli/nova-rootwrap.rst b/doc/source/cli/nova-rootwrap.rst index bb2d85f6f26..4fcae829fcd 100644 --- a/doc/source/cli/nova-rootwrap.rst +++ b/doc/source/cli/nova-rootwrap.rst @@ -2,21 +2,14 @@ nova-rootwrap ============= ---------------------- -Root wrapper for Nova ---------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-rootwrap Synopsis ======== :: - nova-rootwrap [options] + nova-rootwrap CONFIG_FILE COMMMAND Description =========== @@ -42,11 +35,6 @@ To make allowed commands node-specific, your packaging should only install :program:`nova-rootwrap` is being slowly deprecated and replaced by ``oslo.privsep``, and will eventually be removed. -Options -======= - -**General options** - Files ===== @@ -57,7 +45,7 @@ Files See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-compute(1) ` Bugs ==== diff --git a/doc/source/cli/nova-scheduler.rst b/doc/source/cli/nova-scheduler.rst index 30d31c265c3..dd5cd468725 100644 --- a/doc/source/cli/nova-scheduler.rst +++ b/doc/source/cli/nova-scheduler.rst @@ -2,21 +2,14 @@ nova-scheduler ============== --------------- -Nova Scheduler --------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-scheduler Synopsis ======== :: - nova-scheduler [options] + nova-scheduler [...] Description =========== @@ -28,20 +21,26 @@ instance on. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== * ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-compute(1) `, +:doc:`nova-conductor(1) ` Bugs ==== diff --git a/doc/source/cli/nova-serialproxy.rst b/doc/source/cli/nova-serialproxy.rst index cae809b1a22..f362b1b951e 100644 --- a/doc/source/cli/nova-serialproxy.rst +++ b/doc/source/cli/nova-serialproxy.rst @@ -2,21 +2,14 @@ nova-serialproxy ================ ------------------------------------------------------- -Websocket serial Proxy for OpenStack Nova serial ports ------------------------------------------------------- - -:Author: openstack@lists.launchpad.net -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-serialproxy Synopsis ======== :: - nova-serialproxy [options] + nova-serialproxy [...] Description =========== @@ -28,20 +21,46 @@ with OpenStack Nova serial ports. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Websockify options + +.. include:: opts/websockify.rst + +.. rubric:: Serial options + +.. option:: --serial_console-serialproxy_host SERIAL_CONSOLE_SERIALPROXY_HOST + + The IP address which is used by the ``nova-serialproxy`` service to listen + for incoming requests. The ``nova-serialproxy`` service listens on this IP + address for incoming connection requests to instances which expose serial + console. + +.. option:: --serial_console-serialproxy_port SERIAL_CONSOLE_SERIALPROXY_PORT + + The port number which is used by the ``nova-serialproxy`` service to + listen for incoming requests. The ``nova-serialproxy`` service listens on + this port number for incoming connection requests to instances which expose + serial console. + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== * ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-novncproxy(1) `, +:doc:`nova-spicehtml5proxy(1) ` Bugs ==== diff --git a/doc/source/cli/nova-spicehtml5proxy.rst b/doc/source/cli/nova-spicehtml5proxy.rst index bb98904ff66..65173c8d336 100644 --- a/doc/source/cli/nova-spicehtml5proxy.rst +++ b/doc/source/cli/nova-spicehtml5proxy.rst @@ -2,21 +2,14 @@ nova-spicehtml5proxy ==================== -------------------------------------------------------- -Websocket Proxy for OpenStack Nova SPICE HTML5 consoles -------------------------------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-spicehtml5proxy Synopsis ======== :: - nova-spicehtml5proxy [options] + nova-spicehtml5proxy [...] Description =========== @@ -28,20 +21,47 @@ compatible with OpenStack Nova SPICE HTML5 consoles. Options ======= -**General options** +.. rubric:: General options + +.. include:: opts/common.rst + +.. rubric:: Websockify options + +.. include:: opts/websockify.rst + +.. rubric:: Spice options + +.. option:: --spice-html5proxy_host SPICE_HTML5PROXY_HOST + + IP address or a hostname on which the ``nova-spicehtml5proxy`` service + listens for incoming requests. This option depends on the ``[spice] + html5proxy_base_url`` option in ``nova.conf``. The ``nova-spicehtml5proxy`` + service must be listening on a host that is accessible from the HTML5 + client. + +.. option:: --spice-html5proxy_port SPICE_HTML5PROXY_PORT + + Port on which the ``nova-spicehtml5proxy`` service listens for incoming + requests. This option depends on the ``[spice] html5proxy_base_url`` option + in ``nova.conf``. The ``nova-spicehtml5proxy`` service must be listening + on a port that is accessible from the HTML5 client. + +.. rubric:: Debugger options + +.. include:: opts/debugger.rst Files ===== * ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` * ``/etc/nova/rootwrap.conf`` * ``/etc/nova/rootwrap.d/`` See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-novncproxy(1) `, +:doc:`nova-serialproxy(1) ` Bugs ==== diff --git a/doc/source/cli/nova-status.rst b/doc/source/cli/nova-status.rst index 33a05c21aab..a198159e17c 100644 --- a/doc/source/cli/nova-status.rst +++ b/doc/source/cli/nova-status.rst @@ -2,21 +2,14 @@ nova-status =========== --------------------------------------- -CLI interface for nova status commands --------------------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing +.. program:: nova-status Synopsis ======== :: - nova-status [] + nova-status [ [...]] Description =========== @@ -95,7 +88,7 @@ Upgrade make a successful request to the endpoint. The command also checks to see that there are compute node resource providers checking in with the Placement service. More information on the Placement service can be found - at :nova-doc:`Placement API `. + at :placement-doc:`Placement API <>`. **16.0.0 (Pike)** @@ -118,10 +111,52 @@ Upgrade * Checks that existing instances have been migrated to have a matching request spec in the API DB. + **19.0.0 (Stein)** + + * Checks for the Placement API are modified to require version 1.30. + * Checks are added for the **nova-consoleauth** service to warn and provide + additional instructions to set **[workarounds]enable_consoleauth = True** + while performing a live/rolling upgrade. + * The "Resource Providers" upgrade check was removed since the placement + service code is being extracted from nova and the related tables are no + longer used in the ``nova_api`` database. + * The "API Service Version" upgrade check was removed since the corresponding + code for that check was removed in Stein. + + **20.0.0 (Train)** + + * Checks for the Placement API are modified to require version 1.32. + * Checks to ensure block-storage (cinder) API version 3.44 is + available in order to support multi-attach volumes. + If ``[cinder]/auth_type`` is not configured this is a no-op check. + * The "**nova-consoleauth** service" upgrade check was removed since the + service was removed in Train. + * The ``Request Spec Migration`` check was removed. + + **21.0.0 (Ussuri)** + + * Checks for the Placement API are modified to require version 1.35. + * Checks for the policy files are not automatically overwritten with + new defaults. + + **22.0.0 (Victoria)** + + * Checks for the policy files is not JSON-formatted. + + **23.0.0 (Wallaby)** + + * Checks for computes older than the previous major release + * Checks for any instances without ``hw_machine_type`` set. + + **24.0.0 (Xena)** + + * Checks for the Placement API are modified to require version 1.36. + See Also ======== -* :nova-doc:`OpenStack Nova <>` +:doc:`nova-manage(1) `, +:doc:`nova-policy(1) ` Bugs ==== diff --git a/doc/source/cli/nova-xvpvncproxy.rst b/doc/source/cli/nova-xvpvncproxy.rst deleted file mode 100644 index 5b02e314c54..00000000000 --- a/doc/source/cli/nova-xvpvncproxy.rst +++ /dev/null @@ -1,49 +0,0 @@ -================ -nova-xvpvncproxy -================ - ----------------------------- -XVP VNC Console Proxy Server ----------------------------- - -:Author: openstack@lists.openstack.org -:Copyright: OpenStack Foundation -:Manual section: 1 -:Manual group: cloud computing - -Synopsis -======== - -:: - - nova-xvpvncproxy [options] - -Description -=========== - -:program:`nova-xvpvncproxy` is a server daemon that serves the Nova XVP VNC -Console Proxy service, which provides an XVP-based VNC Console Proxy for use -with the Xen hypervisor. - -Options -======= - -**General options** - -Files -===== - -* ``/etc/nova/nova.conf`` -* ``/etc/nova/policy.json`` -* ``/etc/nova/rootwrap.conf`` -* ``/etc/nova/rootwrap.d/`` - -See Also -======== - -* :nova-doc:`OpenStack Nova <>` - -Bugs -==== - -* Nova bugs are managed at `Launchpad `__ diff --git a/doc/source/cli/opts/common.rst b/doc/source/cli/opts/common.rst new file mode 100644 index 00000000000..d369b317110 --- /dev/null +++ b/doc/source/cli/opts/common.rst @@ -0,0 +1,96 @@ +.. option:: --config-dir DIR + + Path to a config directory to pull `*.conf` files from. This file set is + sorted, so as to provide a predictable parse order if individual options + are over-ridden. The set is parsed after the file(s) specified via previous + --config-file, arguments hence over-ridden options in the directory take + precedence. This option must be set from the command-line. + +.. option:: --config-file PATH + + Path to a config file to use. Multiple config files can be specified, with + values in later files taking precedence. Defaults to None. This option must + be set from the command-line. + +.. option:: --debug, -d + + Set the logging level to DEBUG instead of the default INFO level. + +.. option:: --log-config-append PATH, --log-config PATH, --log_config PATH + + The name of a logging configuration file. This file is appended to any + existing logging configuration files. For details about logging + configuration files, see the Python logging module documentation. Note that + when logging configuration files are used then all logging configuration is + set in the configuration file and other logging configuration options are + ignored (for example, log-date-format). + +.. option:: --log-date-format DATE_FORMAT + + Defines the format string for %(asctime)s in log records. Default: None . + This option is ignored if log_config_append is set. + +.. option:: --log-dir LOG_DIR, --logdir LOG_DIR + + (Optional) The base directory used for relative log_file paths. This option + is ignored if log_config_append is set. + +.. option:: --log-file PATH, --logfile PATH + + (Optional) Name of log file to send logging output to. If no default is + set, logging will go to stderr as defined by use_stderr. This option is + ignored if log_config_append is set. + +.. option:: --nodebug + + The inverse of :option:`--debug`. + +.. option:: --nouse-journal + + The inverse of :option:`--use-journal`. + +.. option:: --nouse-json + + The inverse of :option:`--use-json`. + +.. option:: --nouse-syslog + + The inverse of :option:`--use-syslog`. + +.. option:: --nowatch-log-file + + The inverse of :option:`--watch-log-file`. + +.. option:: --syslog-log-facility SYSLOG_LOG_FACILITY + + Syslog facility to receive log lines. This option is ignored if + log_config_append is set. + +.. option:: --use-journal + + Enable journald for logging. If running in a systemd environment you may + wish to enable journal support. Doing so will use the journal native + protocol which includes structured metadata in addition to log + messages.This option is ignored if log_config_append is set. + +.. option:: --use-json + + Use JSON formatting for logging. This option is ignored if + log_config_append is set. + +.. option:: --use-syslog + + Use syslog for logging. Existing syslog format is DEPRECATED and will be + changed later to honor RFC5424. This option is ignored if + log_config_append is set. + +.. option:: --version + + Show program's version number and exit + +.. option:: --watch-log-file + + Uses logging handler designed to watch file system. When log file is moved + or removed this handler will open a new log file with specified path + instantaneously. It makes sense only if log_file option is specified and + Linux platform is used. This option is ignored if log_config_append is set. diff --git a/doc/source/cli/opts/debugger.rst b/doc/source/cli/opts/debugger.rst new file mode 100644 index 00000000000..a0698e0c979 --- /dev/null +++ b/doc/source/cli/opts/debugger.rst @@ -0,0 +1,15 @@ +.. option:: --remote_debug-host REMOTE_DEBUG_HOST + + Debug host (IP or name) to connect to. This command line parameter is used + when you want to connect to a nova service via a debugger running on a + different host. Note that using the remote debug option changes how Nova + uses the eventlet library to support async IO. This could result in + failures that do not occur under normal operation. Use at your own risk. + +.. option:: --remote_debug-port REMOTE_DEBUG_PORT + + Debug port to connect to. This command line parameter allows you to specify + the port you want to use to connect to a nova service via a debugger + running on different host. Note that using the remote debug option changes + how Nova uses the eventlet library to support async IO. This could result + in failures that do not occur under normal operation. Use at your own risk. diff --git a/doc/source/cli/opts/websockify.rst b/doc/source/cli/opts/websockify.rst new file mode 100644 index 00000000000..5d6fbe51cda --- /dev/null +++ b/doc/source/cli/opts/websockify.rst @@ -0,0 +1,41 @@ +.. option:: --cert CERT + + Path to SSL certificate file. + +.. option:: --daemon + + Run as a background process. + +.. option:: --key KEY + + SSL key file (if separate from cert). + +.. option:: --nodaemon + + The inverse of :option:`--daemon`. + +.. option:: --nosource_is_ipv6 + + The inverse of :option:`--source_is_ipv6`. + +.. option:: --nossl_only + + The inverse of :option:`--ssl_only`. + +.. option:: --record RECORD + + Filename that will be used for storing websocket frames received and sent + by a proxy service (like VNC, spice, serial) running on this host. If this + is not set, no recording will be done. + +.. option:: --source_is_ipv6 + + Set to True if source host is addressed with IPv6. + +.. option:: --ssl_only + + Disallow non-encrypted connections. + +.. option:: --web WEB + + Path to directory with content which will be served by a web server. diff --git a/doc/source/common/numa-live-migration-warning.txt b/doc/source/common/numa-live-migration-warning.txt new file mode 100644 index 00000000000..145d85a434b --- /dev/null +++ b/doc/source/common/numa-live-migration-warning.txt @@ -0,0 +1,12 @@ +.. important:: + + In deployments older than Train, or in mixed Stein/Train deployments with a + rolling upgrade in progress, unless :oslo.config:option:`specifically + enabled `, live migration is not + possible for instances with a NUMA topology when using the libvirt + driver. A NUMA topology may be specified explicitly or can be added + implicitly due to the use of CPU pinning or huge pages. Refer to `bug + #1289064`__ for more information. As of Train, live migration of instances + with a NUMA topology when using the libvirt driver is fully supported. + + __ https://bugs.launchpad.net/nova/+bug/1289064 diff --git a/doc/source/conf.py b/doc/source/conf.py index a405ab22264..edae3254d91 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -19,47 +19,40 @@ import os import sys -from nova.version import version_info - # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) -sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.todo', - 'openstackdocstheme', - 'sphinx.ext.coverage', - 'sphinx.ext.graphviz', - 'sphinx_feature_classification.support_matrix', - 'oslo_config.sphinxconfiggen', - 'oslo_config.sphinxext', - 'oslo_policy.sphinxpolicygen', - 'oslo_policy.sphinxext', - 'ext.versioned_notifications', - 'ext.feature_matrix', - 'sphinxcontrib.actdiag', - 'sphinxcontrib.seqdiag', - ] +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.graphviz', + 'openstackdocstheme', + 'sphinx_feature_classification.support_matrix', + 'oslo_config.sphinxconfiggen', + 'oslo_config.sphinxext', + 'oslo_policy.sphinxpolicygen', + 'oslo_policy.sphinxext', + 'ext.versioned_notifications', + 'ext.feature_matrix', + 'ext.extra_specs', + 'sphinxcontrib.actdiag', + 'sphinxcontrib.seqdiag', + 'sphinxcontrib.rsvgconverter', +] -# openstackdocstheme options -repository_name = 'openstack/nova' -bug_project = 'nova' -bug_tag = '' config_generator_config_file = '../../etc/nova/nova-config-generator.conf' sample_config_basename = '_static/nova' policy_generator_config_file = [ ('../../etc/nova/nova-policy-generator.conf', '_static/nova'), - ('../../etc/nova/placement-policy-generator.conf', '_static/placement') ] actdiag_html_image_format = 'SVG' @@ -70,46 +63,14 @@ todo_include_todos = True -# The suffix of source filenames. -source_suffix = '.rst' - # The master toctree document. master_doc = 'index' # General information about the project. -project = u'nova' copyright = u'2010-present, OpenStack Foundation' -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = version_info.release_string() -# The short X.Y version. -version = version_info.version_string() - -# A list of glob-style patterns that should be excluded when looking for -# source files. They are matched against the source file names relative to the -# source directory, using slashes as directory separators on all platforms. -exclude_patterns = [ - 'api/nova.wsgi.nova-*', - 'api/nova.tests.*', -] - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = False - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -modindex_common_prefix = ['nova.'] +pygments_style = 'native' # -- Options for man page output ---------------------------------------------- @@ -117,27 +78,50 @@ # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' _man_pages = [ - ('nova-api-metadata', u'Cloud controller fabric'), - ('nova-api-os-compute', u'Cloud controller fabric'), - ('nova-api', u'Cloud controller fabric'), - ('nova-cells', u'Cloud controller fabric'), - ('nova-compute', u'Cloud controller fabric'), - ('nova-console', u'Cloud controller fabric'), - ('nova-consoleauth', u'Cloud controller fabric'), - ('nova-dhcpbridge', u'Cloud controller fabric'), - ('nova-manage', u'Cloud controller fabric'), - ('nova-network', u'Cloud controller fabric'), - ('nova-novncproxy', u'Cloud controller fabric'), - ('nova-spicehtml5proxy', u'Cloud controller fabric'), - ('nova-serialproxy', u'Cloud controller fabric'), - ('nova-rootwrap', u'Cloud controller fabric'), - ('nova-scheduler', u'Cloud controller fabric'), - ('nova-xvpvncproxy', u'Cloud controller fabric'), - ('nova-conductor', u'Cloud controller fabric'), + ('nova-api', 'Server for the OpenStack Compute API service.'), + ( + 'nova-api-metadata', + 'Server for the OpenStack Compute metadata API service.', + ), + ( + 'nova-api-os-compute', + 'Server for the OpenStack Compute API service.', + ), + ('nova-compute', 'Server for the OpenStack Compute compute service.'), + ('nova-conductor', 'Server for the OpenStack Compute conductor service.'), + ('nova-manage', 'Management tool for the OpenStack Compute services.'), + ( + 'nova-novncproxy', + 'Server for the OpenStack Compute VNC console proxy service.' + ), + ( + 'nova-rootwrap', + 'Root wrapper daemon for the OpenStack Compute service.', + ), + ( + 'nova-policy', + 'Inspect policy configuration for the OpenStack Compute services.', + ), + ( + 'nova-scheduler', + 'Server for the OpenStack Compute scheduler service.', + ), + ( + 'nova-serialproxy', + 'Server for the OpenStack Compute serial console proxy service.', + ), + ( + 'nova-spicehtml5proxy', + 'Server for the OpenStack Compute SPICE console proxy service.', + ), + ( + 'nova-status', + 'Inspect configuration status for the OpenStack Compute services.', + ), ] man_pages = [ - ('cli/%s' % name, name, description, [u'OpenStack'], 1) + ('cli/%s' % name, name, description, ['openstack@lists.openstack.org'], 1) for name, description in _man_pages] # -- Options for HTML output -------------------------------------------------- @@ -155,9 +139,6 @@ # robots.txt. html_extra_path = ['_extra'] -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -html_last_updated_fmt = '%Y-%m-%d %H:%M' # -- Options for LaTeX output ------------------------------------------------- @@ -165,16 +146,40 @@ # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ - ('index', 'Nova.tex', u'Nova Documentation', + ('index', 'doc-nova.tex', u'Nova Documentation', u'OpenStack Foundation', 'manual'), ] +# Allow deeper levels of nesting for \begin...\end stanzas +latex_elements = { + 'maxlistdepth': 10, + 'extraclassoptions': 'openany,oneside', + 'preamble': r''' +\setcounter{tocdepth}{3} +\setcounter{secnumdepth}{3} +''', +} + +# Disable use of xindy since that's another binary dependency that's not +# available on all platforms +latex_use_xindy = False + # -- Options for openstackdocstheme ------------------------------------------- +# openstackdocstheme options +openstackdocs_repo_name = 'openstack/nova' +openstackdocs_bug_project = 'nova' +openstackdocs_bug_tag = 'doc' +openstackdocs_pdf_link = True + # keep this ordered to keep mriedem happy -openstack_projects = [ +# +# NOTE(stephenfin): Projects that don't have a release branch, like TripleO and +# reno, should not be included here +openstackdocs_projects = [ 'ceilometer', 'cinder', + 'cyborg', 'glance', 'horizon', 'ironic', @@ -185,13 +190,21 @@ 'oslo.messaging', 'oslo.i18n', 'oslo.versionedobjects', + 'placement', 'python-novaclient', 'python-openstackclient', - 'reno', 'watcher', ] # -- Custom extensions -------------------------------------------------------- +# NOTE(mdbooth): (2019-03-20) Sphinx loads policies defined in setup.cfg, which +# includes the placement policy at nova/api/openstack/placement/policies.py. +# Loading this imports nova/api/openstack/__init__.py, which imports +# nova.monkey_patch, which will do eventlet monkey patching to the sphinx +# process. As well as being unnecessary and a bad idea, this breaks on +# python3.6 (but not python3.7), so don't do that. +os.environ['OS_NOVA_DISABLE_EVENTLET_PATCHING'] = '1' + def monkey_patch_blockdiag(): """Monkey patch the blockdiag library. @@ -216,6 +229,7 @@ def monkey_patch_blockdiag(): from codecs import getreader from blockdiag.imagedraw import textfolder + from blockdiag.utils import compat # noqa # oh, blockdiag. Let's undo the mess you made. codecs.getreader = getreader diff --git a/doc/source/configuration/config.rst b/doc/source/configuration/config.rst index 8dd6394adc3..571c84e78f5 100644 --- a/doc/source/configuration/config.rst +++ b/doc/source/configuration/config.rst @@ -3,7 +3,10 @@ Configuration Options ===================== The following is an overview of all available configuration options in Nova. -For a sample configuration file, refer to :doc:`/configuration/sample-config`. + +.. only:: html + + For a sample configuration file, refer to :doc:`sample-config`. .. show-options:: :config-file: etc/nova/nova-config-generator.conf diff --git a/doc/source/configuration/extra-specs.rst b/doc/source/configuration/extra-specs.rst new file mode 100644 index 00000000000..45dbf2a94df --- /dev/null +++ b/doc/source/configuration/extra-specs.rst @@ -0,0 +1,211 @@ +=========== +Extra Specs +=========== + +The following is an overview of all extra specs recognized by nova in its +default configuration. + +.. note:: + + Other services and virt drivers may provide additional extra specs not + listed here. In addition, it is possible to register your own extra specs. + For more information on the latter, refer to :doc:`/admin/scheduling`. + +Placement +--------- + +The following extra specs are used during scheduling to modify the request sent +to placement. + +``resources`` +~~~~~~~~~~~~~ + +The following extra specs are used to request an amount of the specified +resource from placement when scheduling. All extra specs expect an integer +value. + +.. note:: + + Not all of the resource types listed below are supported by all virt + drivers. + +.. extra-specs:: resources + :summary: + +``trait`` +~~~~~~~~~ + +The following extra specs are used to request a specified trait from placement +when scheduling. All extra specs expect one of the following values: + +- ``required`` +- ``forbidden`` + +.. note:: + + Not all of the traits listed below are supported by all virt drivers. + +.. extra-specs:: trait + :summary: + +Scheduler Filters +----------------- + +The following extra specs are specific to various in-tree scheduler filters. + +``aggregate_instance_extra_specs`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following extra specs are used to specify metadata that must be present on +the aggregate of a host. If this metadata is not present or does not match the +expected value, the aggregate and all hosts within in will be rejected. + +Requires the ``AggregateInstanceExtraSpecsFilter`` scheduler filter. + +.. extra-specs:: aggregate_instance_extra_specs + +``capabilities`` +~~~~~~~~~~~~~~~~ + +The following extra specs are used to specify a host capability that must be +provided by the host compute service. If this capability is not present or does +not match the expected value, the host will be rejected. + +Requires the ``ComputeCapabilitiesFilter`` scheduler filter. + +All extra specs expect similar types of values: + +* ``=`` (equal to or greater than as a number; same as vcpus case) +* ``==`` (equal to as a number) +* ``!=`` (not equal to as a number) +* ``>=`` (greater than or equal to as a number) +* ``<=`` (less than or equal to as a number) +* ``s==`` (equal to as a string) +* ``s!=`` (not equal to as a string) +* ``s>=`` (greater than or equal to as a string) +* ``s>`` (greater than as a string) +* ``s<=`` (less than or equal to as a string) +* ``s<`` (less than as a string) +* ```` (substring) +* ```` (all elements contained in collection) +* ```` (find one of these) +* A specific value, e.g. ``true``, ``123``, ``testing`` + +Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and +`` fpu gpu`` + +.. note:: + + Not all operators will apply to all types of values. For example, the ``==`` + operator should not be used for a string value - use ``s==`` instead. + +.. extra-specs:: capabilities + :summary: + +Virt driver +----------- + +The following extra specs are used as hints to configure internals of a +instance, from the bus used for paravirtualized devices to the amount of a +physical device to passthrough to the instance. Most of these are virt +driver-specific. + +``quota`` +~~~~~~~~~ + +The following extra specs are used to configure quotas for various +paravirtualized devices. Different quotas are supported by different virt +drivers, as noted below. + +.. extra-specs:: quota + +``accel`` +~~~~~~~~~ + +The following extra specs are used to configure attachment of various +accelerators to an instance. For more information, refer to :cyborg-doc:`the +Cyborg documentation <>`. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: accel + +``pci_passthrough`` +~~~~~~~~~~~~~~~~~~~ + +The following extra specs are used to configure passthrough of a host PCI +device to an instance. This requires prior host configuration. For more +information, refer to :doc:`/admin/pci-passthrough`. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: pci_passthrough + +``hw`` +~~~~~~ + +The following extra specs are used to configure various attributes of +instances. Some of the extra specs act as feature flags, while others tweak for +example the guest-visible CPU topology of the instance. + +Except where otherwise stated, they are only supported by the libvirt virt +driver. + +.. extra-specs:: hw + +``hw_rng`` +~~~~~~~~~~ + +The following extra specs are used to configure a random number generator for +an instance. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: hw_rng + +``hw_video`` +~~~~~~~~~~~~ + +The following extra specs are used to configure attributes of the default guest +video device. + +They are only supported by the libvirt virt driver. + +.. extra-specs:: hw_video + +``os`` +~~~~~~ + +The following extra specs are used to configure various attributes of +instances when using the HyperV virt driver. + +They are only supported by the HyperV virt driver. + +.. extra-specs:: os + +``powervm`` +~~~~~~~~~~~ + +The following extra specs are used to configure various attributes of +instances when using the PowerVM virt driver. + +They are only supported by the PowerVM virt driver. + +.. extra-specs:: powervm + +``vmware`` +~~~~~~~~~~ + +The following extra specs are used to configure various attributes of +instances when using the VMWare virt driver. + +They are only supported by the VMWare virt driver. + +.. extra-specs:: vmware + +Others (uncategorized) +---------------------- + +The following extra specs are not part of a group. + +.. extra-specs:: diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst index 7ebb6e72fd9..f7f40790f16 100644 --- a/doc/source/configuration/index.rst +++ b/doc/source/configuration/index.rst @@ -3,57 +3,135 @@ Configuration Guide =================== The static configuration for nova lives in two main files: ``nova.conf`` and -``policy.json``. These are described below. For a bigger picture view on +``policy.yaml``. These are described below. For a bigger picture view on configuring nova to solve specific problems, refer to the :doc:`Nova Admin Guide `. Configuration ------------- +Nova, like most OpenStack projects, uses INI-style configuration files to +configure various services and utilities. This functionality is provided by the +`oslo.config`__ project. *oslo.config* supports loading configuration from both +individual configuration files and a directory of configuration files. By +default, nova will search the below directories for two config files - +``nova.conf`` and ``{prog}.conf``, where ``prog`` corresponds to the name of +the service or utility being configured such as :program:`nova-compute` - and +two config directories - ``nova.conf.d`` and ``{prog}.conf.d``: + +- ``${HOME}/.nova`` +- ``${HOME}`` +- ``/etc/nova`` +- ``/etc`` +- ``${SNAP_COMMON}/etc/nova/`` +- ``${SNAP}/etc/nova/`` + +Where a matching file is found, all other directories will be skipped. +This behavior can be overridden by using the ``--config-file`` and +``--config-dir`` options provided for each executable. + +More information on how you can use the configuration options to configure +services and what configuration options are available can be found below. + * :doc:`Configuration Guide `: Detailed - configuration guides for various parts of you Nova system. Helpful reference - for setting up specific hypervisor backends. + configuration guides for various parts of your Nova system. Helpful + reference for setting up specific hypervisor backends. * :doc:`Config Reference `: A complete reference of all configuration options available in the ``nova.conf`` file. -* :doc:`Sample Config File `: A sample config - file with inline documentation. +.. only:: html -Nova Policy ------------ + * :doc:`Sample Config File `: A sample config + file with inline documentation. -Nova, like most OpenStack projects, uses a policy language to restrict -permissions on REST API actions. +.. # NOTE(mriedem): This is the section where we hide things that we don't + # actually want in the table of contents but sphinx build would fail if + # they aren't in the toctree somewhere. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: -* :doc:`Policy Reference `: A complete reference of all - policy points in nova and what they impact. + config + +.. # NOTE(amotoki): Sample files are only available in HTML document. + # Inline sample files with literalinclude hit LaTeX processing error + # like TeX capacity exceeded and direct links are discouraged in PDF doc. +.. only:: html -* :doc:`Sample Policy File `: A sample nova - policy file with inline documentation. + .. toctree:: + :hidden: -Placement Policy ----------------- + sample-config -Placement, like most OpenStack projects, uses a policy language to restrict -permissions on REST API actions. +.. __: https://docs.openstack.org/oslo.config/latest/ + +Policy +------ + +Nova, like most OpenStack projects, uses a policy language to restrict +permissions on REST API actions. This functionality is provided by the +`oslo.policy`__ project. *oslo.policy* supports loading policy configuration +from both an individual configuration file, which defaults to ``policy.yaml``, +and one or more directories of configuration files, which defaults to +``policy.d``. These must be located in the same directory as the ``nova.conf`` +file(s). This behavior can be overridden by setting the +:oslo.config:option:`oslo_policy.policy_file` and +:oslo.config:option:`oslo_policy.policy_dirs` configuration options. + +More information on how nova's policy configuration works and about what +policies are available can be found below. + +* :doc:`Policy Concepts `: Starting in the Ussuri + release, Nova API policy defines new default roles with system scope + capabilities. These new changes improve the security level and + manageability of Nova API as they are richer in terms of handling access at + system and project level token with 'Read' and 'Write' roles. -* :doc:`Policy Reference `: A complete - reference of all policy points in placement and what they impact. +* :doc:`Policy Reference `: A complete reference of all + policy points in nova and what they impact. -* :doc:`Sample Policy File `: A sample - placement policy file with inline documentation. +.. only:: html + * :doc:`Sample Policy File `: A sample nova + policy file with inline documentation. .. # NOTE(mriedem): This is the section where we hide things that we don't # actually want in the table of contents but sphinx build would fail if # they aren't in the toctree somewhere. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. .. toctree:: :hidden: - config - sample-config + policy-concepts policy - sample-policy - placement-policy - sample-placement-policy + +.. # NOTE(amotoki): Sample files are only available in HTML document. + # Inline sample files with literalinclude hit LaTeX processing error + # like TeX capacity exceeded and direct links are discouraged in PDF doc. +.. only:: html + + .. toctree:: + :hidden: + + sample-policy + +.. __: https://docs.openstack.org/oslo.policy/latest/ + +Extra Specs +----------- + +Nova uses *flavor extra specs* as a way to provide additional information to +instances beyond basic information like amount of RAM or disk. This information +can range from hints for the scheduler to hypervisor-specific configuration +instructions for the instance. + +* :doc:`Extra Spec Reference `: A complete reference for all extra + specs currently recognized and supported by nova. + +.. toctree:: + :hidden: + + extra-specs diff --git a/doc/source/configuration/placement-policy.rst b/doc/source/configuration/placement-policy.rst deleted file mode 100644 index 67b6cf6d557..00000000000 --- a/doc/source/configuration/placement-policy.rst +++ /dev/null @@ -1,10 +0,0 @@ -================== -Placement Policies -================== - -The following is an overview of all available policies in Placement. -For a sample configuration file, refer to -:doc:`/configuration/sample-placement-policy`. - -.. show-policy:: - :config-file: etc/nova/placement-policy-generator.conf diff --git a/doc/source/configuration/policy-concepts.rst b/doc/source/configuration/policy-concepts.rst new file mode 100644 index 00000000000..e3927b7fc4b --- /dev/null +++ b/doc/source/configuration/policy-concepts.rst @@ -0,0 +1,345 @@ +Understanding Nova Policies +=========================== + +.. warning:: + + JSON formatted policy file is deprecated since Nova 22.0.0(Victoria). + Use YAML formatted file. Use `oslopolicy-convert-json-to-yaml`__ tool + to convert the existing JSON to YAML formatted policy file in backward + compatible way. + +.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html + +Nova supports a rich policy system that has evolved significantly over its +lifetime. Initially, this took the form of a large, mostly hand-written +``policy.yaml`` file but, starting in the Newton (14.0.0) release, policy +defaults have been defined in the codebase, requiring the ``policy.yaml`` +file only to override these defaults. + +In the Ussuri (21.0.0) release, further work was undertaken to address some +issues that had been identified: + +#. No global vs project admin. The ``admin_only`` role is used for the global + admin that is able to make almost any change to Nova, and see all details + of the Nova system. The rule passes for any user with an admin role, it + doesn’t matter which project is used. + +#. No read-only roles. Since several APIs tend to share a single policy rule + for read and write actions, they did not provide the granularity necessary + for read-only access roles. + +#. The ``admin_or_owner`` role did not work as expected. For most APIs with + ``admin_or_owner``, the project authentication happened in a separate + component than API in Nova that did not honor changes to policy. As a + result, policy could not override hard-coded in-project checks. + +Keystone comes with ``admin``, ``member`` and ``reader`` roles by default. +Please refer to :keystone-doc:`this document ` +for more information about these new defaults. In addition, keystone supports +a new "system scope" concept that makes it easier to protect deployment level +resources from project or system level resources. Please refer to +:keystone-doc:`this document ` +and `system scope specification `_ to understand the scope concept. + +In the Nova 21.0.0 (Ussuri) release, Nova policies implemented +the scope concept and default roles provided by keystone (admin, member, +and reader). Using common roles from keystone reduces the likelihood of +similar, but different, roles implemented across projects or deployments +(e.g., a role called ``observer`` versus ``reader`` versus ``auditor``). +With the help of the new defaults it is easier to understand who can do +what across projects, reduces divergence, and increases interoperability. + +The below sections explain how these new defaults in the Nova can solve the +first two issues mentioned above and extend more functionality to end users +in a safe and secure way. + +More information is provided in the `nova specification `_. + +Scope +----- + +OpenStack Keystone supports different scopes in tokens. +These are described :keystone-doc:`here `. +Token scopes represent the layer of authorization. Policy ``scope_types`` +represent the layer of authorization required to access an API. + +.. note:: + + The ``scope_type`` of each policy is hardcoded and is not + overridable via the policy file. + +Nova policies have implemented the scope concept by defining the ``scope_type`` +in policies. To know each policy's ``scope_type``, please refer to the +:doc:`Policy Reference ` and look for ``Scope Types`` or +``Intended scope(s)`` in :doc:`Policy Sample File ` +as shown in below examples. + +.. rubric:: ``system`` scope + +Policies with a ``scope_type`` of ``system`` means a user with a +``system-scoped`` token has permission to access the resource. This can be +seen as a global role. All the system-level operation's policies +have defaulted to ``scope_type`` of ``['system']``. + +For example, consider the ``GET /os-hypervisors`` API. + +.. code:: + + # List all hypervisors. + # GET /os-hypervisors + # Intended scope(s): system + #"os_compute_api:os-hypervisors:list": "rule:system_reader_api" + +.. rubric:: ``project`` scope + +Policies with a ``scope_type`` of ``project`` means a user with a +``project-scoped`` token has permission to access the resource. Project-level +only operation's policies are defaulted to ``scope_type`` of ``['project']``. + +For example, consider the ``POST /os-server-groups`` API. + +.. code:: + + # Create a new server group + # POST /os-server-groups + # Intended scope(s): project + #"os_compute_api:os-server-groups:create": "rule:project_member_api" + +.. rubric:: ``system and project`` scope + +Policies with a ``scope_type`` of ``system and project`` means a user with a +``system-scoped`` or ``project-scoped`` token has permission to access the +resource. All the system and project level operation's policies have defaulted +to ``scope_type`` of ``['system', 'project']``. + +For example, consider the ``POST /servers/{server_id}/action (os-migrateLive)`` +API. + +.. code:: + + # Live migrate a server to a new host without a reboot + # POST /servers/{server_id}/action (os-migrateLive) + # Intended scope(s): system, project + #"os_compute_api:os-migrate-server:migrate_live": "rule:system_admin_api" + +These scope types provide a way to differentiate between system-level and +project-level access roles. You can control the information with scope of the +users. This means you can control that none of the project level role can get +the hypervisor information. + +Policy scope is disabled by default to allow operators to migrate from +the old policy enforcement system in a graceful way. This can be +enabled by configuring the :oslo.config:option:`oslo_policy.enforce_scope` +option to ``True``. + +.. note:: + + [oslo_policy] + enforce_scope=True + + +Roles +----- + +You can refer to :keystone-doc:`this ` +document to know about all available defaults from Keystone. + +Along with the ``scope_type`` feature, Nova policy defines new +defaults for each policy. + +.. rubric:: ``reader`` + +This provides read-only access to the resources within the ``system`` or +``project``. Nova policies are defaulted to below rules: + +.. code:: + + system_reader_api + Default + role:reader and system_scope:all + + system_or_project_reader + Default + (rule:system_reader_api) or (role:reader and project_id:%(project_id)s) + +.. rubric:: ``member`` + +This role is to perform the project level write operation with combination +to the system admin. Nova policies are defaulted to below rules: + +.. code:: + + project_member_api + Default + role:member and project_id:%(project_id)s + + system_admin_or_owner + Default + (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s) + +.. rubric:: ``admin`` + +This role is to perform the admin level write operation at system as well +as at project-level operations. Nova policies are defaulted to below rules: + +.. code:: + + system_admin_api + Default + role:admin and system_scope:all + + project_admin_api + Default + role:admin and project_id:%(project_id)s + + system_admin_or_owner + Default + (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s) + +With these new defaults, you can solve the problem of: + +#. Providing the read-only access to the user. Polices are made more granular + and defaulted to reader rules. For exmaple: If you need to let someone audit + your deployment for security purposes. + +#. Customize the policy in better way. For example, you will be able + to provide access to project level user to perform live migration for their + server or any other project with their token. + +Nova supported scope & Roles +----------------------------- + +Nova supports the below combination of scopes and roles where roles can be +overridden in the policy.yaml file but scope is not override-able. + +#. SYSTEM_ADMIN: ``admin`` role on ``system`` scope + +#. SYSTEM_READER: ``reader`` role on ``system`` scope + +#. PROJECT_ADMIN: ``admin`` role on ``project`` scope + + .. note:: + + PROJECT_ADMIN has the limitation for the below policies + + * ``os_compute_api:servers:create:forced_host`` + * ``os_compute_api:servers:compute:servers:create:requested_destination`` + + To create a server on specific host via force host or requested + destination, you need to pass the hostname in ``POST /servers`` + API request but there is no way for PROJECT_ADMIN to get the hostname + via API. This limitation will be addressed in a future release. + + +#. PROJECT_MEMBER: ``member`` role on ``project`` scope + +#. PROJECT_READER: ``reader`` role on ``project`` scope + +#. PROJECT_MEMBER_OR_SYSTEM_ADMIN: ``admin`` role on ``system`` scope + or ``member`` role on ``project`` scope. Such policy rules are scoped + as both ``system`` as well as ``project``. + +#. PROJECT_READER_OR_SYSTEM_READER: ``reader`` role on ``system`` scope + or ``project`` scope. Such policy rules are scoped as both ``system`` + as well as ``project``. + + .. note:: As of now, only ``system`` and ``project`` scopes are supported in Nova. + +Backward Compatibility +---------------------- + +Backward compatibility with versions prior to 21.0.0 (Ussuri) is maintained by +supporting the old defaults and disabling the ``scope_type`` feature by default. +This means the old defaults and deployments that use them will keep working +as-is. However, we encourage every deployment to switch to new policy. +``scope_type`` will be enabled by default and the old defaults will be removed +starting in the 23.0.0 (W) release. + +To implement the new default reader roles, some policies needed to become +granular. They have been renamed, with the old names still supported for +backwards compatibility. + +Migration Plan +-------------- + +To have a graceful migration, Nova provides two flags to switch to the new +policy completely. You do not need to overwrite the policy file to adopt the +new policy defaults. + +Here is step wise guide for migration: + +#. Create scoped token: + + You need to create the new token with scope knowledge via below CLI: + + - :keystone-doc:`Create System Scoped Token `. + - :keystone-doc:`Create Project Scoped Token `. + +#. Create new default roles in keystone if not done: + + If you do not have new defaults in Keystone then you can create and re-run + the :keystone-doc:`Keystone Bootstrap `. Keystone + added this support in 14.0.0 (Rocky) release. + +#. Enable Scope Checks + + The :oslo.config:option:`oslo_policy.enforce_scope` flag is to enable the + ``scope_type`` features. The scope of the token used in the request is + always compared to the ``scope_type`` of the policy. If the scopes do not + match, one of two things can happen. If :oslo.config:option:`oslo_policy.enforce_scope` + is True, the request will be rejected. If :oslo.config:option:`oslo_policy.enforce_scope` + is False, an warning will be logged, but the request will be accepted + (assuming the rest of the policy passes). The default value of this flag + is False. + + .. note:: Before you enable this flag, you need to audit your users and make + sure everyone who needs system-level access has a system role + assignment in keystone. + +#. Enable new defaults + + The :oslo.config:option:`oslo_policy.enforce_new_defaults` flag switches + the policy to new defaults-only. This flag controls whether or not to use + old deprecated defaults when evaluating policies. If True, the old + deprecated defaults are not evaluated. This means if any existing + token is allowed for old defaults but is disallowed for new defaults, + it will be rejected. The default value of this flag is False. + + .. note:: Before you enable this flag, you need to educate users about the + different roles they need to use to continue using Nova APIs. + + +#. Check for deprecated policies + + A few policies were made more granular to implement the reader roles. New + policy names are available to use. If old policy names which are renamed + are overwritten in policy file, then warning will be logged. Please migrate + those policies to new policy names. + +Below table show how legacy rules are mapped to new rules: + ++--------------------+----------------------------------+-----------------+-------------------+ +| Legacy Rules | New Rules | | | ++====================+==================================+=================+===================+ +| | | *Roles* | *Scope* | +| +----------------------------------+-----------------+-------------------+ +| | SYSTEM_ADMIN | admin | system | +| Project Admin +----------------------------------+-----------------+ | +| Role | SYSTEM_READER | reader | | +| | | | | ++--------------------+----------------------------------+-----------------+-------------------+ +| | PROJECT_ADMIN | admin | project | +| +----------------------------------+-----------------+ | +| | PROJECT_MEMBER | member | | +| +----------------------------------+-----------------+ | +| Project admin or | PROJECT_READER | reader | | +| owner role +----------------------------------+-----------------+-------------------+ +| | PROJECT_MEMBER_OR_SYSTEM_ADMIN | admin on system | system | +| | | or member on | OR | +| | | project | project | +| +----------------------------------+-----------------+ | +| | PROJECT_READER_OR_SYSTEM_READER | reader | | ++--------------------+----------------------------------+-----------------+-------------------+ + +We expect all deployments to migrate to new policy by 23.0.0 release so that +we can remove the support of old policies. diff --git a/doc/source/configuration/policy.rst b/doc/source/configuration/policy.rst index 66b4c7982d3..12b68fd465b 100644 --- a/doc/source/configuration/policy.rst +++ b/doc/source/configuration/policy.rst @@ -2,8 +2,20 @@ Nova Policies ============= -The following is an overview of all available policies in Nova. For a sample -configuration file, refer to :doc:`/configuration/sample-policy`. +The following is an overview of all available policies in Nova. + +.. warning:: + + JSON formatted policy file is deprecated since Nova 22.0.0(Victoria). + Use YAML formatted file. Use `oslopolicy-convert-json-to-yaml`__ tool + to convert the existing JSON to YAML formatted policy file in backward + compatible way. + +.. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html + +.. only:: html + + For a sample configuration file, refer to :doc:`sample-policy`. .. show-policy:: :config-file: etc/nova/nova-policy-generator.conf diff --git a/doc/source/configuration/sample-placement-policy.rst b/doc/source/configuration/sample-placement-policy.rst deleted file mode 100644 index 12e21c52a48..00000000000 --- a/doc/source/configuration/sample-placement-policy.rst +++ /dev/null @@ -1,16 +0,0 @@ -============================ -Sample Placement Policy File -============================ - -The following is a sample placement policy file for adaptation and use. - -The sample policy can also be viewed in :download:`file form -`. - -.. important:: - - The sample policy file is auto-generated from placement when this - documentation is built. You must ensure your version of placement matches - the version of this documentation. - -.. literalinclude:: /_static/placement.policy.yaml.sample diff --git a/doc/source/contributor/api-ref-guideline.rst b/doc/source/contributor/api-ref-guideline.rst index 4cca9f6e3e0..cc5eab25383 100644 --- a/doc/source/contributor/api-ref-guideline.rst +++ b/doc/source/contributor/api-ref-guideline.rst @@ -2,15 +2,14 @@ API reference guideline ======================= -The API reference should be updated when compute or placement APIs are modified +The API reference should be updated when compute APIs are modified (microversion is bumped, etc.). This page describes the guideline for updating the API reference. API reference ============= -* `Compute API reference `_ -* `Placement API reference `_ +* `Compute API reference `_ The guideline to write the API reference ======================================== @@ -24,20 +23,19 @@ Compute API reference * Parameter definition: ``api-ref/source/parameters.yaml`` * JSON request/response samples: ``doc/api_samples/*`` -Placement API reference ------------------------ - -* API reference text: ``placement-api-ref/source/*.inc`` -* Parameter definition: ``placement-api-ref/source/parameters.yaml`` -* JSON request/response samples: ``placement-api-ref/source/samples/*`` - Structure of inc file --------------------- Each REST API is described in the text file (\*.inc). The structure of inc file is as follows: -- Title +- Title (Resource name) + + - Introductory text and context + + The introductory text and the context for the resource in question should + be added. This might include links to the API Concept guide, or building + other supporting documents to explain a concept (like versioning). - API Name @@ -45,8 +43,9 @@ The structure of inc file is as follows: - URL - Description - - Normal status code - - Error status code + + See the `Description`_ section for more details. + - Response codes - Request - Parameters @@ -93,6 +92,159 @@ what is in the code. For instance, the title for the section on method "Get Rdp Console" should be "Get Rdp Console (os-getRDPConsole Action)" NOT "Get Rdp Console (Os-Getrdpconsole Action)" +Description +----------- + +The following items should be described in each API. +Or links to the pages describing them should be added. + +* The purpose of the API(s) + + - e.g. Lists, creates, shows details for, updates, and deletes servers. + - e.g. Creates a server. + +* Microversion + + - Deprecated + + - Warning + - Microversion to start deprecation + - Alternatives (superseded ways) and + their links (if document is available) + + - Added + + - Microversion in which the API has been added + + - Changed behavior + + - Microversion to change behavior + - Explanation of the behavior + + - Changed HTTP response codes + + - Microversion to change the response code + - Explanation of the response code + +* Warning if direct use is not recommended + + - e.g. This is an admin level service API only designed to be used by other + OpenStack services. The point of this API is to coordinate between Nova + and Neutron, Nova and Cinder (and potentially future services) on + activities they both need to be involved in, such as network hotplugging. + Unless you are writing Neutron or Cinder code you should not be using this API. + +* Explanation about statuses of resource in question + + - e.g. The server status. + + - ``ACTIVE``. The server is active. + +* Supplementary explanation for parameters + + - Examples of query parameters + - Parameters that are not specified at the same time + - Values that cannot be specified. + + - e.g. A destination host is the same host. + +* Behavior + + - Config options to change the behavior and the effect + - Effect to resource status + + - Ephemeral disks, attached volumes, attached network ports and others + - Data loss or preserve contents + + - Scheduler + + - Whether the scheduler choose a destination host or not + +* Sort order of response results + + - Describe sorting order of response results if the API implements the order + (e.g. The response is sorted by ``created_at`` and ``id`` + in descending order by default) + +* Policy + + - Default policy (the admin only, the admin or the owner) + - How to change the policy + +* Preconditions + + - Server status + - Task state + - Policy for locked servers + - Quota + - Limited support + + - e.g. Only qcow2 is supported + + - Compute driver support + + - If very few compute drivers support the operation, add a warning and + a link to the support matrix of virt driver. + + - Cases that are not supported + + - e.g. A volume-backed server + +* Postconditions + + - If the operation is asynchronous, + it should be "Asynchronous postconditions". + + - Describe what status/state resource in question becomes by the operation + + - Server status + - Task state + - Path of output file + +* Troubleshooting + + - e.g. If the server status remains ``BUILDING`` or shows another error status, + the request failed. Ensure you meet the preconditions then investigate + the compute node. + +* Related operations + + - Operations to be paired + + - e.g. Start and stop + + - Subsequent operation + + - e.g. "Confirm resize" after "Resize" operation + +* Performance + + - e.g. The progress of this operation depends on the location of + the requested image, network I/O, host load, selected flavor, and other + factors. + +* Progress + + - How to get progress of the operation if the operation is asynchronous. + +* Restrictions + + - Range that ID is unique + + - e.g. HostId is unique per account and is not globally unique. + +* How to avoid errors + + - e.g. The server to get console log from should set + ``export LC_ALL=en_US.UTF-8`` in order to avoid incorrect unicode error. + +* Reference + + - Links to the API Concept guide, or building other supporting documents to + explain a concept (like versioning). + +* Other notices + Response codes ~~~~~~~~~~~~~~ @@ -122,6 +274,12 @@ The description of typical error response codes are as follows: * - 503 - serviceUnavailable(503) +In addition, the following explanations should be described. + +- Conditions under which each normal response code is returned + (If there are multiple normal response codes.) +- Conditions under which each error response code is returned + Parameters ---------- @@ -196,6 +354,45 @@ If a parameter must be specified in the request or always appears in the response in the micoversion added or later, the parameter must be defined as required (``true``). +Microversion +~~~~~~~~~~~~ + +If a parameter is available starting from a microversion, +the microversion must be described by ``min_version`` +in the parameter file. +However, if the minimum microversion is the same as a microversion +that the API itself is added, it is not necessary to describe the microversion. + +For example:: + + aggregate_uuid: + description: | + The UUID of the host aggregate. + in: body + required: true + type: string + min_version: 2.41 + +This example describes that ``aggregate_uuid`` is available starting +from microversion 2.41. + +If a parameter is available up to a microversion, +the microversion must be described by ``max_version`` +in the parameter file. + +For example:: + + security_group_rules: + description: | + The number of allowed rules for each security group. + in: body + required: false + type: integer + max_version: 2.35 + +This example describes that ``security_group_rules`` is available up to +microversion 2.35 (and has been removed since microversion 2.36). + The order of parameters in the parameter file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -213,17 +410,20 @@ The order of parameters in the parameter file has to be kept as follows: Example ------- -.. TODO:: - - The guideline for request/response JSON bodies should be added. +One or more examples should be provided for operations whose request and/or +response contains a payload. The example should describe what the operation +is attempting to do and provide a sample payload for the request and/or +response as appropriate. +Sample files should be created in the ``doc/api_samples`` directory and inlined +by inclusion. -Body ----- +When an operation has no payload in the response, a suitable message should be +included. For example:: -.. TODO:: + There is no body content for the response of a successful DELETE query. - The guideline for the introductory text and the context for the resource - in question should be added. +Examples for multiple microversions should be included in ascending +microversion order. Reference ========= @@ -231,3 +431,4 @@ Reference * `Verifying the Nova API Ref `_ * `The description for Parameters whose values are null `_ * `The definition of "Optional" parameter `_ +* `How to document your OpenStack API service `_ diff --git a/doc/source/contributor/api.rst b/doc/source/contributor/api.rst index 47210518001..23719901476 100644 --- a/doc/source/contributor/api.rst +++ b/doc/source/contributor/api.rst @@ -14,7 +14,7 @@ The v2.1 API framework is under ``nova/api`` and each API is implemented in ``nova/api/openstack/compute``. Note that any change to the Nova API to be merged will first require a -spec be approved first. See `here `_ +spec be approved first. See `here `_ for the appropriate repository. For guidance on the design of the API please refer to the `OpenStack API WG `_ @@ -49,10 +49,10 @@ A very basic controller of a v2.1 API:: write_body_here = ok return response_body - # Defining support for other RESTFul methods based on resouce. + # Defining support for other RESTFul methods based on resource. -See `servers.py `_ for ref. +See `servers.py `_ for ref. All of the controller modules should live in the ``nova/api/openstack/compute`` directory. @@ -100,7 +100,7 @@ A basic skeleton of URL mapping in routers.py:: . ) -Complete routing list can be found in `routes.py `_. +Complete routing list can be found in `routes.py `_. Policy @@ -231,7 +231,7 @@ The functional API samples tests are not the simplest thing in the world to get used to, and can be very frustrating at times when they fail in not obvious ways. If you need help debugging a functional API sample test failure, feel free to post your work-in-progress change for review and ask for help in -the ``openstack-nova`` freenode IRC channel. +the ``openstack-nova`` OFTC IRC channel. Documentation @@ -246,7 +246,7 @@ Things to consider here include: * Marking existing parameters as deprecated in a new microversion More information on the compute API reference format and conventions can -be found here: https://wiki.openstack.org/wiki/NovaAPIRef +be found in the :doc:`/contributor/api-ref-guideline`. For more detailed documentation of certain aspects of the API, consider writing something into the compute API guide found under path @@ -258,10 +258,9 @@ Deprecating APIs Compute REST API routes may be deprecated by capping a method or functionality using microversions. For example, the -:ref:`2.36 microversion <2.36 microversion>` deprecated -several compute REST API routes which only work when using the ``nova-network`` -service, which itself was deprecated, or are proxies to other external -services like Cinder, Neutron, etc. +:ref:`2.36 microversion <2.36 microversion>` deprecated several compute REST +API routes which only worked when using the since-removed ``nova-network`` +service or are proxies to other external services like cinder, neutron, etc. The point of deprecating with microversions is users can still get the same functionality at a lower microversion but there is at least some way to signal @@ -308,7 +307,7 @@ The general steps for removing support for a deprecated REST API are: the REST API routes that were removed along with any related configuration options that were also removed. -Here is an example of the above steps: https://review.openstack.org/567682/ +Here is an example of the above steps: https://review.opendev.org/567682/ -.. _route mapping: http://git.openstack.org/cgit/openstack/nova/tree/nova/api/openstack/compute/routes.py -.. _Obsolete APIs: https://developer.openstack.org/api-ref/compute/#obsolete-apis +.. _route mapping: https://opendev.org/openstack/nova/src/branch/master/nova/api/openstack/compute/routes.py +.. _Obsolete APIs: https://docs.openstack.org/api-ref/compute/#obsolete-apis diff --git a/doc/source/contributor/code-review.rst b/doc/source/contributor/code-review.rst index d89f033eb58..a887bbb92d7 100644 --- a/doc/source/contributor/code-review.rst +++ b/doc/source/contributor/code-review.rst @@ -4,7 +4,10 @@ Code Review Guide for Nova ========================== -This is a very terse set of points for reviewers to consider when +OpenStack has a general set of code review guidelines: +https://docs.openstack.org/infra/manual/developers.html#peer-review + +What follows is a very terse set of points for reviewers to consider when looking at nova code. These are things that are important for the continued smooth operation of Nova, but that tend to be carried as "tribal knowledge" instead of being written down. It is an attempt to @@ -22,7 +25,7 @@ RPC API Versions * The manager-side (example: compute/manager) needs a version bump * The manager-side method needs to tolerate older calls as well as - newer calls + newer calls * Arguments can be added as long as they are optional. Arguments cannot be removed or changed in an incompatible way. * The RPC client code (example: compute/rpcapi.py) needs to be able @@ -111,7 +114,7 @@ very long. So here are some key points: * The field names contained in a request/response body should use snake_case style, not CamelCase or Mixed_Case style. -* `HTTP Response Codes `_ +* `HTTP Response Codes `_ * Synchronous resource creation: ``201 Created`` * Asynchronous resource creation: ``202 Accepted`` @@ -173,9 +176,9 @@ from the relevant third party test, on the latest patchset, before a +2 vote can be applied. Specifically, changes to nova/virt/driver/ need a +1 vote from the respective third party CI. -For example, if you change something in the XenAPI virt driver, you must wait -for a +1 from the XenServer CI on the latest patchset, before you can give -that patch set a +2 vote. +For example, if you change something in the Hyper-V virt driver, you must wait +for a +1 from the Hyper-V CI on the latest patchset, before you can give that +patch set a +2 vote. This is important to ensure: @@ -232,8 +235,12 @@ Microversion API If a new microversion API is added, the following needs to happen: -* A new patch for the microversion API change in python-novaclient side - should be submitted before the microversion change in Nova is merged. +* A new patch for the microversion API change in both python-novaclient + and in python-openstackclient should be submitted before the microversion + change in Nova is merged. See :python-novaclient-doc:`Adding support for a + new microversion ` in python-novaclient for more + details. See also `Add support for 'server group create --rule' parameter`_ + patch as example how to support a new microversion in the openstack client. * If the microversion changes the response schema, a new schema and test for the microversion must be added to Tempest. The microversion change in Nova should not be merged until the Tempest test is submitted and at least @@ -242,6 +249,8 @@ If a new microversion API is added, the following needs to happen: should reference the Change-Id of the Tempest test for reviewers to identify it. +.. _`Add support for 'server group create --rule' parameter`: https://review.opendev.org/#/c/761597 + Notifications ============= diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst new file mode 100644 index 00000000000..132ad285c47 --- /dev/null +++ b/doc/source/contributor/contributing.rst @@ -0,0 +1,62 @@ +============================ +So You Want to Contribute... +============================ + +For general information on contributing to OpenStack, please check out the +`contributor guide `_ to get started. +It covers all the basics that are common to all OpenStack projects: the accounts +you need, the basics of interacting with our Gerrit review system, how we +communicate as a community, etc. + +Below will cover the more project specific information you need to get started +with nova. + +Communication +~~~~~~~~~~~~~ + +:doc:`how-to-get-involved` + +Contacting the Core Team +~~~~~~~~~~~~~~~~~~~~~~~~ + +The overall structure of the Nova team is documented on `the wiki +`_. + +New Feature Planning +~~~~~~~~~~~~~~~~~~~~ + +If you want to propose a new feature please read the :doc:`blueprints` page. + +Task Tracking +~~~~~~~~~~~~~ + +We track our tasks in `Launchpad `__. + +If you're looking for some smaller, easier work item to pick up and get started +on, search for the 'low-hanging-fruit' tag. + +Reporting a Bug +~~~~~~~~~~~~~~~ + +You found an issue and want to make sure we are aware of it? You can do so on +`Launchpad `__. +More info about Launchpad usage can be found on `OpenStack docs page +`_. + +Getting Your Patch Merged +~~~~~~~~~~~~~~~~~~~~~~~~~ + +All changes proposed to the Nova requires two ``Code-Review +2`` votes from +Nova core reviewers before one of the core reviewers can approve patch by +giving ``Workflow +1`` vote. More detailed guidelines for reviewers of Nova +patches are available at :doc:`code-review`. + + +Project Team Lead Duties +~~~~~~~~~~~~~~~~~~~~~~~~ + +All common PTL duties are enumerated in the `PTL guide +`_. + +For the Nova specific duties you can read the Nova PTL guide +:doc:`ptl-guide` diff --git a/doc/source/contributor/development-environment.rst b/doc/source/contributor/development-environment.rst index ec86a6269d7..32b8f8334e0 100644 --- a/doc/source/contributor/development-environment.rst +++ b/doc/source/contributor/development-environment.rst @@ -71,7 +71,7 @@ Getting the code Grab the code from git:: - git clone https://git.openstack.org/openstack/nova + git clone https://opendev.org/openstack/nova cd nova @@ -105,7 +105,7 @@ On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: tox -e bindep sudo yum install -On openSUSE-based distributions (SLES 12, openSUSE Leap 42.1 or Tumbleweed):: +On openSUSE-based distributions (SLES, openSUSE Leap / Tumbleweed):: sudo zypper in python-pip sudo pip install tox @@ -129,6 +129,12 @@ or Mac OS X 10.7 (OpenSSL 0.9.8r) or Mac OS X 10.10.3 (OpenSSL 0.9.8zc) works fine with nova. OpenSSL versions from brew like OpenSSL 1.0.1k work fine as well. +Brew is very useful for installing dependencies. As a minimum for running tests, +install the following:: + + brew install python3 postgres + python3 -mpip install tox + Building the Documentation ========================== @@ -151,36 +157,47 @@ Running unit tests See `Running Python Unit Tests`_. -.. _`Running Python Unit Tests`: http://docs.openstack.org/infra/manual/python.html#running-python-unit-tests +.. _`Running Python Unit Tests`: https://docs.openstack.org/project-team-guide/project-setup/python.html#running-python-unit-tests Note that some unit and functional tests use a database. See the file ``tools/test-setup.sh`` on how the databases are set up in the OpenStack CI environment and replicate it in your test environment. +Using the pre-commit hook +========================= + +Nova makes use of the `pre-commit framework `__ to +allow running of some linters on each commit. This must be enabled locally to +function: + +.. code-block:: shell + + $ pip install --user pre-commit + $ pre-commit install --allow-missing-config + Using a remote debugger ======================= -Some modern IDE such as pycharm (commercial) or Eclipse (open source) support remote debugging. In order to run nova with remote debugging, start the nova process -with the following parameters ---remote_debug-host ---remote_debug-port +Some modern IDE such as pycharm (commercial) or Eclipse (open source) support remote debugging. In order to +run nova with remote debugging, start the nova process with the following parameters:: + + --remote_debug-host + --remote_debug-port -Before you start your nova process, start the remote debugger using the instructions for that debugger. -For pycharm - http://blog.jetbrains.com/pycharm/2010/12/python-remote-debug-with-pycharm/ -For Eclipse - http://pydev.org/manual_adv_remote_debugger.html +Before you start your nova process, start the remote debugger using the instructions for that debugger: -More detailed instructions are located here - http://novaremotedebug.blogspot.com +* For pycharm - http://blog.jetbrains.com/pycharm/2010/12/python-remote-debug-with-pycharm/ +* For Eclipse - http://pydev.org/manual_adv_remote_debugger.html + +More detailed instructions are located here - https://wiki.openstack.org/wiki/Nova/RemoteDebugging Using fake computes for tests ============================= The number of instances supported by fake computes is not limited by physical constraints. It allows you to perform stress tests on a deployment with few -resources (typically a laptop). But you must avoid using scheduler filters -limiting the number of instances per compute (like RamFilter, DiskFilter, -AggregateCoreFilter), otherwise they will limit the number of instances per -compute. - +resources (typically a laptop). Take care to avoid using scheduler filters +that will limit the number of instances per compute, such as ``AggregateCoreFilter``. Fake computes can also be used in multi hypervisor-type deployments in order to take advantage of fake and "real" computes during tests: diff --git a/doc/source/contributor/documentation.rst b/doc/source/contributor/documentation.rst index b7e157f6636..acddf831cad 100644 --- a/doc/source/contributor/documentation.rst +++ b/doc/source/contributor/documentation.rst @@ -47,7 +47,7 @@ every stable release (e.g. ``pike``). unimproved as we address content in ``latest``. The ``api-ref`` and ``api-guide`` publish only from master to a single site on -`developer.openstack.org`. As such, they are effectively branchless. +`docs.openstack.org`. As such, they are effectively branchless. Guidelines for consumable docs ============================== diff --git a/doc/source/contributor/evacuate-vs-rebuild.rst b/doc/source/contributor/evacuate-vs-rebuild.rst new file mode 100644 index 00000000000..92d0a308beb --- /dev/null +++ b/doc/source/contributor/evacuate-vs-rebuild.rst @@ -0,0 +1,103 @@ +=================== +Evacuate vs Rebuild +=================== + +The `evacuate API`_ and `rebuild API`_ are commonly confused in nova because +the internal `conductor code`_ and `compute code`_ use the same methods called +``rebuild_instance``. This document explains some of the differences in what +happens between an evacuate and rebuild operation. + +High level +~~~~~~~~~~ + +*Evacuate* is an operation performed by an administrator when a compute service +or host is encountering some problem, goes down and needs to be fenced from the +network. The servers that were running on that compute host can be rebuilt on +a **different** host using the **same** image. If the source and destination +hosts are running on shared storage then the root disk image of the servers can +be retained otherwise the root disk image (if not using a volume-backed server) +will be lost. This is one example of why it is important to attach data volumes +to a server to store application data and leave the root disk for the operating +system since data volumes will be re-attached to the server as part of the +evacuate process. + +*Rebuild* is an operation which can be performed by a non-administrative owner +of the server (the user) performed on the **same** compute host to change +certain aspects of the server, most notably using a **different** image. Note +that the image does not *have* to change and in the case of volume-backed +servers the image `currently cannot change`_. Other attributes of the server +can be changed as well such as ``key_name`` and ``user_data``. See the +`rebuild API`_ reference for full usage details. When a user rebuilds a server +they want to change it which requires re-spawning the guest in the hypervisor +but retain the UUID, volumes and ports attached to the server. For a +non-volume-backed server the root disk image is rebuilt. + +Scheduling +~~~~~~~~~~ + +Evacuate always schedules the server to another host and rebuild always occurs +on the same host. + +Note that when `rebuilding with a different image`_, the request is run through +the scheduler to ensure the new image is still valid for the current compute +host. + +Image +~~~~~ + +As noted above, the image that the server uses during an evacuate operation +does not change. The image used to rebuild a server *may* change but does not +have to and in the case of volume-backed servers *cannot* change. + +Resource claims +~~~~~~~~~~~~~~~ + +The compute service ``ResourceTracker`` has a `claims`_ operation which is used +to ensure resources are available before building a server on the host. The +scheduler performs the initial filtering of hosts to ensure a server +can be built on a given host and the compute claim is essentially meant as a +secondary check to prevent races when the scheduler has out of date information +or when there are concurrent build requests going to the same host. + +During an evacuate operation there is a `rebuild claim`_ since the server is +being re-built on a different host. + +During a rebuild operation, since the flavor does not change, there is +`no claim`_ made since the host does not change. + +Allocations +~~~~~~~~~~~ + +Since the 16.0.0 (Pike) release, the scheduler uses the `placement service`_ +to filter compute nodes (resource providers) based on information in the flavor +and image used to build the server. Once the scheduler runs through its filters +and weighers and picks a host, resource class `allocations`_ are atomically +consumed in placement with the server as the consumer. + +During an evacuate operation, the allocations held by the server consumer +against the source compute node resource provider are left intact since the +source compute service is down. Note that `migration-based allocations`_, +which were introduced in the 17.0.0 (Queens) release, do not apply to evacuate +operations but only resize, cold migrate and live migrate. So once a server +is successfully evacuated to a different host, the placement service will track +allocations for that server against both the source and destination compute +node resource providers. If the source compute service is restarted after +being evacuated and fixed, the compute service will +`delete the old allocations`_ held by the evacuated servers. + +During a rebuild operation, since neither the host nor flavor changes, the +server allocations remain intact. + +.. _evacuate API: https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action +.. _rebuild API: https://docs.openstack.org/api-ref/compute/#rebuild-server-rebuild-action +.. _conductor code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/conductor/manager.py#L944 +.. _compute code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L3052 +.. _currently cannot change: https://specs.openstack.org/openstack/nova-specs/specs/train/approved/volume-backed-server-rebuild.html +.. _rebuilding with a different image: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/api.py#L3414 +.. _claims: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/claims.py +.. _rebuild claim: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L3104 +.. _no claim: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L3108 +.. _placement service: https://docs.openstack.org/placement/latest/ +.. _allocations: https://docs.openstack.org/api-ref/placement/#allocations +.. _migration-based allocations: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/migration-allocations.html +.. _delete the old allocations: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L627 diff --git a/doc/source/contributor/how-to-get-involved.rst b/doc/source/contributor/how-to-get-involved.rst index 960fae37ad2..4e99f79e19f 100644 --- a/doc/source/contributor/how-to-get-involved.rst +++ b/doc/source/contributor/how-to-get-involved.rst @@ -24,7 +24,7 @@ We are working on building easy ways for you to get help and ideas on how to learn more about Nova and how the Nova community works. Any questions, please ask! If you are unsure who to ask, then please -contact the `Mentoring Czar`__. +contact the `PTL`__. __ `Nova People`_ @@ -33,14 +33,15 @@ How do I get started? There are quite a few global docs on this: -- http://www.openstack.org/assets/welcome-guide/OpenStackWelcomeGuide.pdf -- https://wiki.openstack.org/wiki/How_To_Contribute -- http://www.openstack.org/community/ +- https://docs.openstack.org/contributors/ +- https://www.openstack.org/community/ +- https://www.openstack.org/assets/welcome-guide/OpenStackWelcomeGuide.pdf +- https://wiki.openstack.org/wiki/How_To_Contribute There is more general info, non Nova specific info here: -- https://wiki.openstack.org/wiki/Mentors -- https://wiki.openstack.org/wiki/OpenStack_Upstream_Training +- https://wiki.openstack.org/wiki/Mentoring +- https://docs.openstack.org/upstream-training/ What should I work on? ~~~~~~~~~~~~~~~~~~~~~~ @@ -54,15 +55,10 @@ downvoted. There is also the :ref:`code-review`. Once you have some understanding, start reviewing patches. It's OK to ask people to explain things you don't understand. It's also OK to see some potential problems but put a +0. -Another way is to look for a subteam you'd like to get involved with and review -their patches. See: -https://etherpad.openstack.org/p/rocky-nova-priorities-tracking - Once you're ready to write code, take a look at some of the work already marked as low-hanging fruit: * https://bugs.launchpad.net/nova/+bugs?field.tag=low-hanging-fruit -* https://etherpad.openstack.org/p/nova-low-hanging-fruit How do I get my feature in? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -104,7 +100,7 @@ Here are some top tips around engaging with the Nova community: - not sure about asking questions? feel free to listen in around other people's questions - we recommend you setup an IRC bouncer: - https://wiki.openstack.org/wiki/IRC + https://docs.openstack.org/contributors/common/irc.html - Email @@ -215,15 +211,16 @@ really helps you: - Doing more reviews, and seeing what other reviewers notice, will help you better understand what is expected of code that gets merged into - master + master. - Having more non-core people do great reviews, leaves less review work - for the core reviewers to do, so we are able get more code merged + for the core reviewers to do, so we are able get more code merged. - Empathy is one of the keys to a happy community. If you are used to doing code reviews, you will better understand the comments you get when people review your code. As you do more code reviews, and see what others notice, you will get a better idea of what people are looking for when then apply a +2 to your code. -- TODO - needs more detail +- If you do quality reviews, you'll be noticed and it's more likely + you'll get reciprocal eyes on your reviews. What are the most useful types of code review comments? Well here are a few to the top ones: @@ -264,7 +261,7 @@ reviews: - Where do I start? What should I review? - There are various tools, but a good place to start is: - https://etherpad.openstack.org/p/rocky-nova-priorities-tracking + https://etherpad.openstack.org/p/nova-runways-xena - Depending on the time in the cycle, it's worth looking at NeedsCodeReview blueprints: https://blueprints.launchpad.net/nova/ @@ -279,7 +276,7 @@ reviews: - Maybe take a look at things you want to see merged, bug fixes and features, or little code fixes - Look for things that have been waiting a long time for a review: - http://5885fef486164bb8596d-41634d3e64ee11f37e8658ed1b4d12ec.r44.cf3.rackcdn.com/nova-openreviews.html + https://review.opendev.org/#/q/project:openstack/nova+status:open+age:2weeks - If you get through the above lists, try other tools, such as: http://status.openstack.org/reviews @@ -326,7 +323,7 @@ becoming a member of nova-core. How to do great nova-spec reviews? ================================== -https://specs.openstack.org/openstack/nova-specs/specs/rocky/template.html +https://specs.openstack.org/openstack/nova-specs/specs/xena/template.html :doc:`/contributor/blueprints`. @@ -369,12 +366,11 @@ There are many ways to help lead the Nova project: * Mentoring efforts, and getting started tips: https://wiki.openstack.org/wiki/Nova/Mentoring * Info on process, with a focus on how you can go from an idea - to getting code merged Nova: - https://wiki.openstack.org/wiki/Nova/Mitaka_Release_Schedule + to getting code merged Nova: :ref:`process` * Consider leading an existing `Nova subteam`_ or forming a new one. * Consider becoming a `Bug tag owner`_. * Contact the PTL about becoming a Czar `Nova People`_. .. _`Nova people`: https://wiki.openstack.org/wiki/Nova#People .. _`Nova subteam`: https://wiki.openstack.org/wiki/Nova#Nova_subteams -.. _`Bug tag owner`: https://wiki.openstack.org/wiki/Nova/BugTriage#Step_2:_Triage_Tagged_Bugs +.. _`Bug tag owner`: https://wiki.openstack.org/wiki/Nova/BugTriage#Tag_Owner_List diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst index 8116bacb514..2889199147e 100644 --- a/doc/source/contributor/index.rst +++ b/doc/source/contributor/index.rst @@ -7,6 +7,14 @@ enhance documentation, and increase testing. Contributions of any type are valuable, and part of what keeps the project going. Here are a list of resources to get your started. +Basic Information +================= + +.. toctree:: + :maxdepth: 2 + + contributing + Getting Started =============== @@ -14,6 +22,14 @@ Getting Started * :doc:`/contributor/development-environment`: Get your computer setup to contribute +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + how-to-get-involved + development-environment + Nova Process ============ @@ -24,7 +40,7 @@ is important for maintaining the strong ecosystem around nova. Our process is always evolving, just as nova and the community around nova evolves over time. If there are things that seem strange, or you have ideas on -how to improve things, please bring them forward on IRC or the openstack-dev +how to improve things, please bring them forward on IRC or the openstack-discuss mailing list, so we continue to improve how the nova community operates. This section looks at the processes and why. The main aim behind all the @@ -42,6 +58,19 @@ while keeping users happy and keeping developers productive. * :doc:`/contributor/blueprints`: An overview of our tracking artifacts. +* :doc:`/contributor/ptl-guide`: A chronological PTL reference guide + +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + project-scope + policies + process + blueprints + ptl-guide + Reviewing ========= @@ -57,6 +86,15 @@ Reviewing * :doc:`/contributor/documentation`: Guidelines for handling documentation contributions +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + releasenotes + code-review + /reference/i18n + documentation Testing ======= @@ -76,6 +114,24 @@ be Python code. All new code needs to be validated somehow. * :doc:`/contributor/testing/zero-downtime-upgrade` + * :doc:`/contributor/testing/down-cell` + +* **Profiling Guides**: These are guides to profiling nova. + + * :doc:`/contributor/testing/eventlet-profiling` + +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + testing + testing/libvirt-numa + testing/serial-console + testing/zero-downtime-upgrade + testing/down-cell + testing/eventlet-profiling + The Nova API ============ @@ -93,13 +149,34 @@ changes done to the API, as the impact can be very wide. * :doc:`/contributor/api-ref-guideline`: The guideline to write the API reference. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + api + api-2 + microversions + api-ref-guideline + Nova Major Subsystems ===================== -Major subsystems in nova have different needs; some of those are documented -here. If you are contributing to one of these please read the subsystem guide -before diving in. +Major subsystems in nova have different needs. If you are contributing to one +of these please read the :ref:`reference guide ` before +diving in. + +* Move operations + + * :doc:`/contributor/evacuate-vs-rebuild`: Describes the differences between + the often-confused evacuate and rebuild operations. + * :doc:`/contributor/resize-and-cold-migrate`: Describes the differences and + similarities between resize and cold migrate operations. -* :doc:`/contributor/placement` +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: -* :doc:`/user/conductor` + evacuate-vs-rebuild + resize-and-cold-migrate diff --git a/doc/source/contributor/microversions.rst b/doc/source/contributor/microversions.rst index e984bbfcc99..e06a75d7d3a 100644 --- a/doc/source/contributor/microversions.rst +++ b/doc/source/contributor/microversions.rst @@ -44,7 +44,7 @@ responses from the server. providing microversion ``2.27`` must use the older form. For full details please read the `Kilo spec for microversions -`_ +`_ and `Microversion Specification `_. @@ -226,7 +226,7 @@ A microversion is not needed in the following situation: **Footnotes** -.. [#f3] https://review.openstack.org/#/c/523194/ +.. [#f3] https://review.opendev.org/#/c/523194/ In Code ------- @@ -368,14 +368,24 @@ necessary to add changes to other places which describe your change: * Make a new commit to python-novaclient and update corresponding files to enable the newly added microversion API. + See :python-novaclient-doc:`Adding support for a new microversion + ` in python-novaclient for more details. * If the microversion changes the response schema, a new schema and test for the microversion must be added to Tempest. +* If applicable, add Functional sample tests under + ``nova/tests/functional/api_sample_tests``. Also, add JSON examples to + ``doc/api_samples`` directory which can be generated automatically via tox + env ``api-samples`` or run test with env var ``GENERATE_SAMPLES`` True. + * Update the `API Reference`_ documentation as appropriate. The source is located under `api-ref/source/`. -.. _API Reference: https://developer.openstack.org/api-ref/compute/ +* If the microversion changes servers related APIs, update the + ``api-guide/source/server_concepts.rst`` accordingly. + +.. _API Reference: https://docs.openstack.org/api-ref/compute/ Allocating a microversion ------------------------- diff --git a/doc/source/contributor/placement.rst b/doc/source/contributor/placement.rst deleted file mode 100644 index 2f94c747e7e..00000000000 --- a/doc/source/contributor/placement.rst +++ /dev/null @@ -1,434 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=============================== - Placement API Developer Notes -=============================== - -Overview -======== - -The Nova project introduced the :doc:`placement service ` as -part of the Newton release. The service provides an HTTP API to manage -inventories of different classes of resources, such as disk or virtual cpus, -made available by entities called resource providers. Information provided -through the placement API is intended to enable more effective accounting of -resources in an OpenStack deployment and better scheduling of various entities -in the cloud. - -The document serves to explain the architecture of the system and to provide -some guidance on how to maintain and extend the code. For more detail on why -the system was created and how it does its job see :doc:`/user/placement`. - -Big Picture -=========== - -The placement service is straightforward: It is a `WSGI`_ application that -sends and receives JSON, using an RDBMS (usually MySQL) for persistence. -As state is managed solely in the DB, scaling the placement service is done by -increasing the number of WSGI application instances and scaling the RDBMS using -traditional database scaling techniques. - -For sake of consistency and because there was initially intent to make the -entities in the placement service available over RPC, -:oslo.versionedobjects-doc:`versioned objects <>` are used to provide the -interface between the HTTP application layer and the SQLAlchemy-driven -persistence layer. Even without RPC, these objects provide useful structuring -and separation of the code. - -Though the placement service doesn't aspire to be a `microservice` it does -aspire to continue to be small and minimally complex. This means a relatively -small amount of middleware that is not configurable, and a limited number of -exposed resources where any given resource is represented by one (and only -one) URL that expresses a noun that is a member of the system. Adding -additional resources should be considered a significant change requiring robust -review from many stakeholders. - -The set of HTTP resources represents a concise and constrained grammar for -expressing the management of resource providers, inventories, resource classes, -traits, and allocations. If a solution is initially designed to need more -resources or a more complex grammar that may be a sign that we need to give our -goals greater scrutiny. Is there a way to do what we want with what we have -already? Can some other service help? Is a new collaborating service required? - -Minimal Framework -================= - -The API is set up to use a minimal framework that tries to keep the structure -of the application as discoverable as possible and keeps the HTTP interaction -near the surface. The goal of this is to make things easy to trace when -debugging or adding functionality. - -Functionality which is required for every request is handled in raw WSGI -middleware that is composed in the `nova.api.openstack.placement.deploy` -module. Dispatch or routing is handled declaratively via the -``ROUTE_DECLARATIONS`` map defined in the -`nova.api.openstack.placement.handler` module. - -Mapping is by URL plus request method. The destination is a complete WSGI -application, using a subclass of the `wsgify`_ method from `WebOb`_ to provide -a `Request`_ object that provides convenience methods for accessing request -headers, bodies, and query parameters and for generating responses. In the -placement API these mini-applications are called `handlers`. The `wsgify` -subclass is provided in `nova.api.openstack.placement.wsgi_wrapper` as -`PlacementWsgify`. It is used to make sure that JSON formatted error responses -are structured according to the API-WG `errors`_ guideline. - -This division between middleware, dispatch and handlers is supposed to -provide clues on where a particular behavior or functionality should be -implemented. Like most such systems, this doesn't always work but is a useful -tool. - -Gotchas -======= - -This section tries to shed some light on some of the differences between the -placement API and some of the nova APIs or on situations which may be -surprising or unexpected. - -* The placement API is somewhat more strict about `Content-Type` and `Accept` - headers in an effort to follow the HTTP RFCs. - - If a user-agent sends some JSON in a `PUT` or `POST` request without a - `Content-Type` of `application/json` the request will result in an error. - - If a `GET` request is made without an `Accept` header, the response will - default to being `application/json`. - - If a request is made with an explicit `Accept` header that does not include - `application/json` then there will be an error and the error will attempt to - be in the requested format (for example, `text/plain`). - -* If a URL exists, but a request is made using a method that that URL does not - support, the API will respond with a `405` error. Sometimes in the nova APIs - this can be a `404` (which is wrong, but understandable given the constraints - of the code). - -* Because each handler is individually wrapped by the `PlacementWsgify` - decorator any exception that is a subclass of `webob.exc.WSGIHTTPException` - that is raised from within the handler, such as `webob.exc.HTTPBadRequest`, - will be caught by WebOb and turned into a valid `Response`_ containing - headers and body set by WebOb based on the information given when the - exception was raised. It will not be seen as an exception by any of the - middleware in the placement stack. - - In general this is a good thing, but it can lead to some confusion if, for - example, you are trying to add some middleware that operates on exceptions. - - Other exceptions that are not from `WebOb`_ will raise outside the handlers - where they will either be caught in the `__call__` method of the - `PlacementHandler` app that is responsible for dispatch, or by the - `FaultWrap` middleware. - -Microversions -============= - -The placement API makes use of `microversions`_ to allow the release of new -features on an opt in basis. See :doc:`/user/placement` for an up to date -history of the available microversions. - -The rules around when a microversion is needed are the same as for the -:doc:`compute API `. When adding a new microversion -there are a few bits of required housekeeping that must be done in the code: - -* Update the ``VERSIONS`` list in - ``nova/api/openstack/placement/microversion.py`` to indicate the new - microversion and give a very brief summary of the added feature. -* Update ``nova/api/openstack/placement/rest_api_version_history.rst`` - to add a more detailed section describing the new microversion. -* Add a :reno-doc:`release note <>` with a ``features`` section announcing the - new or changed feature and the microversion. -* If the ``version_handler`` decorator (see below) has been used, - increment ``TOTAL_VERSIONED_METHODS`` in - ``nova/tests/unit/api/openstack/placement/test_microversion.py``. - This provides a confirmatory check just to make sure you're paying - attention and as a helpful reminder to do the other things in this - list. -* Include functional gabbi tests as appropriate (see `Using Gabbi`_). At the - least, update the ``latest microversion`` test in - ``nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml``. -* Update the `API Reference`_ documentation as appropriate. The source is - located under `placement-api-ref/source/`. - -In the placement API, microversions only use the modern form of the -version header:: - - OpenStack-API-Version: placement 1.2 - -If a valid microversion is present in a request it will be placed, -as a ``Version`` object, into the WSGI environment with the -``placement.microversion`` key. Often, accessing this in handler -code directly (to control branching) is the most explicit and -granular way to have different behavior per microversion. A -``Version`` instance can be treated as a tuple of two ints and -compared as such or there is a ``matches`` method. - -A ``version_handler`` decorator is also available. It makes it possible to have -multiple different handler methods of the same (fully-qualified by package) -name, each available for a different microversion window. If a request wants a -microversion that's not available, a defined status code is returned (usually -``404`` or ``405``). There is a unit test in place which will fail if there are -version intersections. - -Adding a New Handler -==================== - -Adding a new URL or a new method (e.g, ``PATCH``) to an existing URL -requires adding a new handler function. In either case a new microversion and -release note is required. When adding an entirely new route a request for a -lower microversion should return a ``404``. When adding a new method to an -existing URL a request for a lower microversion should return a ``405``. - -In either case, the ``ROUTE_DECLARATIONS`` dictionary in the -`nova.api.openstack.placement.handler` module should be updated to point to a -function within a module that contains handlers for the type of entity -identified by the URL. Collection and individual entity handlers of the same -type should be in the same module. - -As mentioned above, the handler function should be decorated with -``@wsgi_wrapper.PlacementWsgify``, take a single argument ``req`` which is a -WebOb `Request`_ object, and return a WebOb `Response`_. - -For ``PUT`` and ``POST`` methods, request bodies are expected to be JSON -based on a content-type of ``application/json``. This may be enforced by using -a decorator: ``@util.require_content('application/json')``. If the body is not -`JSON`, a ``415`` response status is returned. - -Response bodies are usually `JSON`. A handler can check the `Accept` header -provided in a request using another decorator: -``@util.check_accept('application/json')``. If the header does not allow -`JSON`, a ``406`` response status is returned. - -If a hander returns a response body, a ``Last-Modified`` header should be -included with the response. If the entity or entities in the response body -are directly associated with an object (or objects, in the case of a -collection response) that has an ``updated_at`` (or ``created_at``) -field, that field's value can be used as the value of the header (WebOb will -take care of turning the datetime object into a string timestamp). A -``util.pick_last_modified`` is available to help choose the most recent -last-modified when traversing a collection of entities. - -If there is no directly associated object (for example, the output is the -composite of several objects) then the ``Last-Modified`` time should be -``timeutils.utcnow(with_timezone=True)`` (the timezone must be set in order -to be a valid HTTP timestamp). For example, the response__ to -``GET /allocation_candidates`` should have a last-modified header of now -because it is composed from queries against many different database entities, -presents a mixture of result types (allocation requests and provider -summaries), and has a view of the system that is only meaningful *now*. - -__ https://developer.openstack.org/api-ref/placement/#list-allocation-candidates - -If a ``Last-Modified`` header is set, then a ``Cache-Control`` header with a -value of ``no-cache`` must be set as well. This is to avoid user-agents -inadvertently caching the responses. - -`JSON` sent in a request should be validated against a JSON Schema. A -``util.extract_json`` method is available. This takes a request body and a -schema. If multiple schema are used for different microversions of the same -request, the caller is responsible for selecting the right one before calling -``extract_json``. - -When a handler needs to read or write the data store it should use methods on -the objects found in the -`nova.api.openstack.placement.objects.resource_provider` package. Doing so -requires a context which is provided to the handler method via the WSGI -environment. It can be retrieved as follows:: - - context = req.environ['placement.context'] - -.. note:: If your change requires new methods or new objects in the - `resource_provider` package, after you've made sure that you really - do need those new methods or objects (you may not!) make those - changes in a patch that is separate from and prior to the HTTP API - change. - -If a handler needs to return an error response, with the advent of `Placement -API Error Handling`_, it is possible to include a code in the JSON error -response. This can be used to distinguish different errors with the same HTTP -response status code (a common case is a generation conflict versus an -inventory in use conflict). Error codes are simple namespaced strings (e.g., -``placement.inventory.inuse``) for which symbols are maintained in -``nova.api.openstack.placement.errors``. Adding a symbol to a response is done -by using the ``comment`` kwarg to a WebOb exception, like this:: - - except exception.InventoryInUse as exc: - raise webob.exc.HTTPConflict( - _('update conflict: %(error)s') % {'error': exc}, - comment=errors.INVENTORY_INUSE) - -Code that adds newly raised exceptions should include an error code. Find -additional guidelines on use in the docs for -``nova.api.openstack.placement.errors``. - -Testing of handler code is described in the next section. - -Testing -======= - -Most of the handler code in the placement API is tested using `gabbi`_. Some -utility code is tested with unit tests found in -`nova/tests/unit/api/openstack/placement/`. The back-end objects are tested -with a combination of unit and functional tests found in -``nova/tests/unit/api/openstack/placement/objects/test_resource_provider.py`` -and `nova/tests/functional/api/openstack/placement/db`. Adding unit and -non-gabbi functional tests is done in the same way as other aspects of nova. - -When writing tests for handler code (that is, the code found in -``nova/api/openstack/placement/handlers``) a good rule of thumb is that if you -feel like there needs to be a unit test for some of the code in the handler, -that is a good sign that the piece of code should be extracted to a separate -method. That method should be independent of the handler method itself (the one -decorated by the ``wsgify`` method) and testable as a unit, without mocks if -possible. If the extracted method is useful for multiple resources consider -putting it in the ``util`` package. - -As a general guide, handler code should be relatively short and where there are -conditionals and branching, they should be reachable via the gabbi functional -tests. This is merely a design goal, not a strict constraint. - -Using Gabbi ------------ - -Gabbi was developed in the `telemetry`_ project to provide a declarative way to -test HTTP APIs that preserves visibility of both the request and response of -the HTTP interaction. Tests are written in YAML files where each file is an -ordered suite of tests. Fixtures (such as a database) are set up and torn down -at the beginning and end of each file, not each test. JSON response bodies can -be evaluated with `JSONPath`_. The placement WSGI -application is run via `wsgi-intercept`_, meaning that real HTTP requests are -being made over a file handle that appears to Python to be a socket. - -In the placement API the YAML files (aka "gabbits") can be found in -``nova/tests/functional/api/openstack/placement/gabbits``. Fixture definitions -are in ``nova/tests/functional/api/openstack/placement/fixtures/gabbits.py``. -Tests are frequently grouped by handler name (e.g., ``resource-provider.yaml`` -and ``inventory.yaml``). This is not a requirement and as we increase the -number of tests it makes sense to have more YAML files with fewer tests, -divided up by the arc of API interaction that they test. - -The gabbi tests are integrated into the functional tox target, loaded via -``nova/tests/functional/api/openstack/placement/test_placement_api.py``. If you -want to run just the gabbi tests one way to do so is:: - - tox -efunctional test_placement_api - -If you want to run just one yaml file (in this example ``inventory.yaml``):: - - tox -efunctional placement_api.inventory - -It is also possible to run just one test from within one file. When you do this -every test prior to the one you asked for will also be run. This is because -the YAML represents a sequence of dependent requests. Select the test by using -the name in the yaml file, replacing space with ``_``:: - - tox -efunctional placement_api.inventory_post_new_ipv4_address_inventory - -.. note:: ``tox.ini`` in the nova repository is configured by a ``group_regex`` - so that each gabbi YAML is considered a group. Thus, all tests in the - file will be run in the same process when running stestr concurrently - (the default). - -Writing More Gabbi Tests ------------------------- - -The docs for `gabbi`_ try to be complete and explain the `syntax`_ in some -depth. Where something is missing or confusing, please log a `bug`_. - -While it is possible to test all aspects of a response (all the response -headers, the status code, every attribute in a JSON structure) in one single -test, doing so will likely make the test harder to read and will certainly make -debugging more challenging. If there are multiple things that need to be -asserted, making multiple requests is reasonable. Since database set up is only -happening once per file (instead of once per test) and since there's no TCP -overhead, the tests run quickly. - -While `fixtures`_ can be used to establish entities that are required for -tests, creating those entities via the HTTP API results in tests which are more -descriptive. For example the ``inventory.yaml`` file creates the resource -provider to which it will then add inventory. This makes it easy to explore a -sequence of interactions and a variety of responses with the tests: - -* create a resource provider -* confirm it has empty inventory -* add inventory to the resource provider (in a few different ways) -* confirm the resource provider now has inventory -* modify the inventory -* delete the inventory -* confirm the resource provider now has empty inventory - -Nothing special is required to add a new set of tests: create a YAML file with -a unique name in the same directory as the others. The other files can provide -examples. Gabbi can provide a useful way of doing test driven development of a -new handler: create a YAML file that describes the desired URLs and behavior -and write the code to make it pass. - -It's also possible to use gabbi against a running placement service, for -example in devstack. See `gabbi-run`_ to get started. - -Futures -======= - -Since before it was created there has been a long term goal for the placement -service to be extracted to its own repository and operate as its own -independent service. There are many reasons for this, but two main ones are: - -* Multiple projects, not just nova, will eventually need to manage resource - providers using the placement API. -* A separate service helps to maintain and preserve a strong contract between - the placement service and the consumers of the service. - -To lessen the pain of the eventual extraction of placement the service has been -developed in a way to limit dependency on the rest of the nova codebase and be -self-contained: - -* Most code is in `nova/api/openstack/placement`. -* Database query code is kept within the objects in - `nova/api/openstack/placement/objects`. -* The methods on the objects are not remotable, as the only intended caller is - the placement API code. - -There are some exceptions to the self-contained rule (which are actively being -addressed to prepare for the extraction): - -* Some of the code related to a resource class cache is within the `nova.db` - package, while other parts are in ``nova/rc_fields.py``. -* Database models, migrations and tables are described as part of the nova api - database. An optional configuration option, - :oslo.config:option:`placement_database.connection`, can be set to use a - database just for placement (based on the api database schema). -* `nova.i18n` package provides the ``_`` and related functions. -* ``nova.conf`` is used for configuration. -* Unit and functional tests depend on fixtures and other functionality in base - classes provided by nova. - -When creating new code for the placement service, please be aware of the plan -for an eventual extraction and avoid creating unnecessary interdependencies. - -.. _WSGI: https://www.python.org/dev/peps/pep-3333/ -.. _wsgify: http://docs.webob.org/en/latest/api/dec.html -.. _WebOb: http://docs.webob.org/en/latest/ -.. _Request: http://docs.webob.org/en/latest/reference.html#request -.. _Response: http://docs.webob.org/en/latest/#response -.. _microversions: http://specs.openstack.org/openstack/api-wg/guidelines/microversion_specification.html -.. _gabbi: https://gabbi.readthedocs.io/ -.. _telemetry: http://specs.openstack.org/openstack/telemetry-specs/specs/kilo/declarative-http-tests.html -.. _wsgi-intercept: http://wsgi-intercept.readthedocs.io/ -.. _syntax: https://gabbi.readthedocs.io/en/latest/format.html -.. _bug: https://github.com/cdent/gabbi/issues -.. _fixtures: http://gabbi.readthedocs.io/en/latest/fixtures.html -.. _JSONPath: http://goessner.net/articles/JsonPath/ -.. _gabbi-run: http://gabbi.readthedocs.io/en/latest/runner.html -.. _errors: http://specs.openstack.org/openstack/api-wg/guidelines/errors.html -.. _API Reference: https://developer.openstack.org/api-ref/placement/ -.. _Placement API Error Handling: http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/placement-api-error-handling.html diff --git a/doc/source/contributor/policies.rst b/doc/source/contributor/policies.rst index f1f611a50d5..4e916eb9cbc 100644 --- a/doc/source/contributor/policies.rst +++ b/doc/source/contributor/policies.rst @@ -33,7 +33,7 @@ Public Contractual APIs Although nova has many internal APIs, they are not all public contractual APIs. Below is a link of our public contractual APIs: -* https://developer.openstack.org/api-ref/compute/ +* https://docs.openstack.org/api-ref/compute/ Anything not in this list is considered private, not to be used outside of nova, and should not be considered stable. @@ -59,7 +59,7 @@ As a patch author, you should try to offset the reviewer resources spent on your patch by reviewing other patches. If no one does this, the review team (cores and otherwise) become spread too thin. -For review guidelines see: https://docs.openstack.org/infra/manual/developers.html#peer-review +For review guidelines see: :doc:`code-review` Reverts for Retrospective Vetos =============================== @@ -129,3 +129,32 @@ deployments should not rely on the metrics that Nova gathers and should instead focus their efforts on alternative solutions for placement. .. _Newton midcycle: http://lists.openstack.org/pipermail/openstack-dev/2016-August/100600.html + +Continuous Delivery Mentality +============================= + +Nova generally tries to subscribe to a philosophy of anything we merge today +can be in production today, and people can continuously deliver Nova. + +In practice this means we should not merge code that will not work until some +later change is merged, because that later change may never come, or not come +in the same release cycle, or may be substantially different from what was +originally intended. For example, if patch A uses code that is not available +until patch D later in the series. + +The strategy for dealing with this in particularly long and complicated series +of changes is to start from the "bottom" with code that is no-op until it is +"turned on" at the top of the stack, generally with some feature flag, policy +rule, API microversion, etc. So in the example above, the code from patch D +should come before patch A even if nothing is using it yet, but things will +build on it. Realistically this means if you are working on a feature that +touches most of the Nova "stack", i.e. compute driver/service through to API, +you will work on the compute driver/service code first, then conductor and/or +scheduler, and finally the API. An extreme example of this can be found by +reading the `code review guide for the cross-cell resize feature`_. + +Even if this philosophy is not the reality of how the vast majority of +OpenStack deployments consume Nova, it is a development philosophy to try and +avoid merging broken code. + +.. _code review guide for the cross-cell resize feature: http://lists.openstack.org/pipermail/openstack-discuss/2019-May/006366.html diff --git a/doc/source/contributor/process.rst b/doc/source/contributor/process.rst index 05e6036ee38..fe04ccec1fd 100644 --- a/doc/source/contributor/process.rst +++ b/doc/source/contributor/process.rst @@ -36,12 +36,12 @@ If you are new to Nova, please read this first: :ref:`getting_involved`. Dates overview ============== -For Rocky, please see: -https://wiki.openstack.org/wiki/Nova/Rocky_Release_Schedule +For Xena, please see: +https://wiki.openstack.org/wiki/Nova/Xena_Release_Schedule -.. note: Throughout this document any link which references the name of a - release cycle in the link can usually be changed to the name of the - current cycle to get up to date information. +.. note:: Throughout this document any link which references the name of a + release cycle in the link can usually be changed to the name of the + current cycle to get up to date information. Feature Freeze ~~~~~~~~~~~~~~ @@ -102,9 +102,9 @@ Why we have a Spec Freeze: By the freeze date, we expect all blueprints that will be approved for the cycle to be listed on launchpad and all relevant specs to be merged. -For Rocky, blueprints can be found at -https://blueprints.launchpad.net/nova/rocky and specs at -https://specs.openstack.org/openstack/nova-specs/specs/rocky/index.html +For Xena, blueprints can be found at +https://blueprints.launchpad.net/nova/xena and specs at +https://specs.openstack.org/openstack/nova-specs/specs/xena/index.html Starting with Liberty, we are keeping a backlog open for submission at all times. @@ -125,59 +125,6 @@ yourself available to discuss the blueprint, or alternatively make your case on the ML before the meeting): https://wiki.openstack.org/wiki/Meetings/Nova#Agenda_for_next_meeting -Non-priority Feature Freeze -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is a Nova specific process. - -This only applies to low priority blueprints in this list: -https://blueprints.launchpad.net/nova/rocky - -We currently have a very finite amount of review bandwidth. In order to -make code review time for the agreed community wide priorities, we have -to not do some other things. In each cycle, milestones are used to bound -when certain types of work will be active and reviewed and to avoid crushing -the gate with too much code near the end of the cycle. - -For example, in the Liberty cycle, we reserved the liberty-3 milestone for -priority features and bug fixes and did not merge any non-priority things -during liberty-3. This meant that liberty-2 was the "Feature Freeze" for -blueprints that were not a priority for the Liberty cycle. - -You can see the list of priorities for each release: -http://specs.openstack.org/openstack/nova-specs/#priorities - -For things that are very close to merging, it's possible to request an -exception for one week after the freeze date, given the patches get -enough +2s from the core team to get the code merged. But we expect this -list to be zero, if everything goes to plan (no massive gate failures, -etc). For history of the process see: -http://lists.openstack.org/pipermail/openstack-dev/2015-July/070920.html - -Exception process: - -- Please add request in here: - https://etherpad.openstack.org/p/rocky-nova-non-priority-feature-freeze - (ideally with core reviewers to sponsor your patch, normally the - folks who have already viewed those patches) -- make sure you make your request before the end of the feature freeze - exception period -- nova-drivers will meet to decide what gets an exception (for some history - see: - http://lists.openstack.org/pipermail/openstack-dev/2015-February/056208.html) -- an initial list of exceptions (probably just a PTL compiled list at - that point) will be available for discussion during the next Nova meeting -- the aim is to merge the code for all exceptions early in the following week - -Alternatives: - -- It was hoped to make this a continuous process using "slots" to - control what gets reviewed, but this was rejected by the community - when it was last discussed. There is hope this can be resurrected to - avoid the "lumpy" nature of this process. -- Currently the runways/kanban ideas are blocked on us adopting - something like phabricator that could support such workflows - String Freeze ~~~~~~~~~~~~~ @@ -236,7 +183,7 @@ But here is the rough idea: with limited deployer and doc impact, it probably doesn't need a spec. -If you are unsure, please ask the PTL on IRC, or one of the other +If you are unsure, please ask the `PTL`_ on IRC, or one of the other nova-drivers. How do I get my blueprint approved? @@ -254,7 +201,7 @@ So you need your blueprint approved? Here is how: - if you need a spec, then please submit a nova-spec for review, see: https://docs.openstack.org/infra/manual/developers.html -Got any more questions? Contact the PTL or one of the other +Got any more questions? Contact the `PTL`_ or one of the other nova-specs-core who are awake at the same time as you. IRC is best as you will often get an immediate response, if they are too busy send him/her an email. @@ -275,7 +222,7 @@ lead to terse responses with very little preamble or nicety. That said, there's no excuse for being actively rude or mean. OpenStack has a Code of Conduct (https://www.openstack.org/legal/community-code-of-conduct/) and if you feel this has been breached please raise the matter -privately. Either with the relevant parties, the PTL or failing those, +privately. Either with the relevant parties, the `PTL`_ or failing those, the OpenStack Foundation. That said, there are many objective reasons for applying a -1 or -2 to a @@ -330,8 +277,9 @@ entirely. A few tips: you should do if you are unable to negotiate a resolution to an issue. -Secondly, Nova is a big project, be aware of the average wait times: -http://russellbryant.net/openstack-stats/nova-openreviews.html +Secondly, Nova is a big project, look for things that have been waiting +a long time for a review: +https://review.opendev.org/#/q/project:openstack/nova+status:open+age:2weeks Eventually you should get some +1s from people working through the review queue. Expect to get -1s as well. You can ask for reviews within @@ -359,8 +307,10 @@ It helps to apply correct tracking information. message tags as necessary. - If you have to raise a bug in Launchpad first, do it - this helps someone else find your fix. -- Make sure the bug has the correct priority and tag set: - https://wiki.openstack.org/wiki/Nova/BugTriage#Step_2:_Triage_Tagged_Bugs +- Make sure the bug has the correct `priority`_ and `tag`_ set. + +.. _priority: https://wiki.openstack.org/wiki/BugTriage#Task_2:_Prioritize_confirmed_bugs_.28bug_supervisors.29 +.. _tag: https://wiki.openstack.org/wiki/Nova/BugTriage#Tags Features ^^^^^^^^ @@ -379,8 +329,6 @@ blueprint-only features: For blueprint and spec features, do everything for blueprint-only features and also: -- If it's a project or subteam priority, add it to: - https://etherpad.openstack.org/p/rocky-nova-priorities-tracking - Ensure your spec is approved for the current release cycle. If your code is a project or subteam priority, the cores interested in @@ -729,6 +677,74 @@ Getting that extra testing in place should stop a whole heap of bugs, again giving reviewers more time to get to the issues or features you want to add in the future. +What the Review-Priority label in Gerrit are use for? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A bit of history first. Nova used so called runway slots for multiple cycles. +There was 3 slots, each can be filled with a patch series ready for review for +two weeks at a time. We assumed that cores are focusing on reviewing the series +while it is in the slot. We also assumed that the patch author is available and +quickly fixing feedback while the series is in the slot. Meanwhile other +patches waited in a FIFO queue for a free slot. + +Our experience was: + +1) It only worked if somebody kept the state of the queue and the slots up to + date in the etherpad. So it needed a central authority to manage the + process. This did not scale well. + +2) It was as effective as we, cores, are kept it honest and allocated our + review time on the patches in the slots. Such commitment is hard to get or + follow up on without being aggressive. + +So the aim of the new review priority process is to be as decentralized amongst +cores as possible. We trust cores that when they mark something as priority +then they also themselves commit to review the patch. We also assume that if a +core reviewed a patch then that core should easily find another core as a +second reviewer when needed. + +Note that this process does not want to change how a patch is discovered to be +ready for review. The patch authors free to you any existing forums and ways to +get review attention. + +Therefore we use the Review-Priority label in Gerrit in the following way: + +* Review-Priority is a label with 0 or +1 values, that can be set by the + members of the core team + +* A core sets the Review-Priority flag to +1 to indicate that they will help + the author to get the patch merged. + +* We expect that the cores will limit the number of patches marked with +1 + Review-Priority based on their actual review bandwidth + +* We expect that cores will check the list of reviews already having + Review-Priority +1 set by other cores before they mark a new one as such to + see where they can help first by being the second core. + +* There will be a regular agenda point on the weekly meeting where the team + look at the list of patches with +1 mark to keep an overall view what is + happening in nova. + +Pros: + +* Decentralized + +* Each core is responsible of its own commitments + +* Review priority information is kept close to the review system + +Cons: + +* No externally enforced time limit on patches sitting idle with +1 + Review-Priority + +* No externally enforced limit on how many things can be a priority at any + given time. + +* Does not (want to) solve the problem of discovering reviews that are ready to + core review + Process Evolution Ideas ======================= @@ -783,13 +799,9 @@ merge greater strength. In addition, having the subteam focus review efforts on a subset of patches should help concentrate the nova-core reviews they get, and increase the velocity of getting code merged. -The first part is for subgroups to show they can do a great job of -recommending patches. This is starting in here: -https://etherpad.openstack.org/p/rocky-nova-priorities-tracking - -Ideally this would be done with gerrit user "tags" rather than an -etherpad. There are some investigations by sdague in how feasible it -would be to add tags to gerrit. +Ideally this would be done with gerrit user "tags". +There are some investigations by sdague in how feasible it would be to add +tags to gerrit. Stop having to submit a spec for each release ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -931,3 +943,5 @@ Main benefits: are added - allows a way to add experimental things into Nova, and track either their removal or maturation + +.. _PTL: https://governance.openstack.org/tc/reference/projects/nova.html diff --git a/doc/source/contributor/project-scope.rst b/doc/source/contributor/project-scope.rst index 4a688fad8e2..2ff344d7898 100644 --- a/doc/source/contributor/project-scope.rst +++ b/doc/source/contributor/project-scope.rst @@ -89,15 +89,15 @@ it is expected that gap with shrink over time. Driver Parity ************** -Our goal for the Nova API to provide a consistent abstraction to access +Our goal for the Nova API is to provide a consistent abstraction to access on demand compute resources. We are not aiming to expose all features of all hypervisors. Where the details of the underlying hypervisor leak through our APIs, we have failed in this goal, and we must work towards better -abstractions that are more interoperable. +abstractions that are more `interoperable`_. This is one reason why we put so much emphasis on the use of Tempest in third party CI systems. -The key tenant of driver parity is that if a feature is supported in a driver, +The key tenet of driver parity is that if a feature is supported in a driver, it must feel the same to users, as if they where using any of the other drivers that also support that feature. The exception is that, if possible for widely different performance characteristics, but the effect of that API call @@ -107,11 +107,13 @@ Following on from that, should a feature only be added to one of the drivers, we must make every effort to ensure another driver could be implemented to match that behavior. -Its important that drivers support enough features, so the API actually +It is important that drivers support enough features, so the API actually provides a consistent abstraction. For example, being unable to create a -server or delete a server, would severely undermine that goal. +server or delete a server would severely undermine that goal. In fact, Nova only ever manages resources it creates. +.. _interoperable: https://www.openstack.org/brand/interop/ + Upgrades --------- @@ -192,11 +194,10 @@ As Glance moves to deprecate its v1 API, we need to translate calls from the old v1 API we expose, to Glance's v2 API. The next API to mention is the networking APIs, in particular the -security groups API. If you are using nova-network, Nova is still the only -way to perform these network operations. -But if you use Neutron, security groups has a much richer Neutron API, -and if you use both Nova API and Neutron API, the miss match can lead to -some very unexpected results, in certain cases. +security groups API. Most of these APIs exist from when ``nova-network`` +existed and the proxies were added during the transition. However, security +groups has a much richer Neutron API, and if you use both Nova API and Neutron +API, the mismatch can lead to some very unexpected results, in certain cases. Our intention is to avoid adding to the problems we already have in this area. diff --git a/doc/source/contributor/ptl-guide.rst b/doc/source/contributor/ptl-guide.rst new file mode 100644 index 00000000000..d12e1beeb0e --- /dev/null +++ b/doc/source/contributor/ptl-guide.rst @@ -0,0 +1,325 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Chronological PTL guide +======================= + +This is just a reference guide that a PTL may use as an aid, if they choose. + +New PTL +------- + +* Update the nova meeting chair + + * https://github.com/openstack-infra/irc-meetings/blob/master/meetings/nova-team-meeting.yaml + +* Update the team wiki + + * https://wiki.openstack.org/wiki/Nova#People + +* Get acquainted with the release schedule + + * Example: https://wiki.openstack.org/wiki/Nova/Stein_Release_Schedule + +Project Team Gathering +---------------------- + +* Create PTG planning etherpad, retrospective etherpad and alert about it in + nova meeting and dev mailing list + + * Example: https://etherpad.openstack.org/p/nova-ptg-stein + +* Run sessions at the PTG + +* Have a priorities discussion at the PTG + + * Example: https://etherpad.openstack.org/p/nova-ptg-stein-priorities + +* Sign up for group photo at the PTG (if applicable) + +* Open review runways for the cycle + + * Example: https://etherpad.openstack.org/p/nova-runways-stein + +After PTG +--------- + +* Send PTG session summaries to the dev mailing list + +* Make sure the cycle priorities spec gets reviewed and merged + + * Example: https://specs.openstack.org/openstack/nova-specs/priorities/stein-priorities.html + +* Run the count-blueprints script daily to gather data for the cycle burndown chart + +A few weeks before milestone 1 +------------------------------ + +* Plan a spec review day + +* Periodically check the series goals others have proposed in the “Set series + goals” link: + + * Example: https://blueprints.launchpad.net/nova/stein/+setgoals + +Milestone 1 +----------- + +* Do milestone release of nova and python-novaclient (in launchpad only) + + * This is launchpad bookkeeping only. With the latest release team changes, + projects no longer do milestone releases. See: https://releases.openstack.org/reference/release_models.html#cycle-with-milestones-legacy + + * For nova, set the launchpad milestone release as “released” with the date + +* Release other libraries if there are significant enough changes since last + release. When releasing the first version of a library for the cycle, bump + the minor version to leave room for future stable branch releases + + * os-vif + +* Release stable branches of nova + + * ``git checkout `` + + * ``git log --no-merges ..`` + + * Examine commits that will go into the release and use it to decide + whether the release is a major, minor, or revision bump according to + semver + + * Then, propose the release with version according to semver x.y.z + + * X - backward-incompatible changes + + * Y - features + + * Z - bug fixes + + * Use the new-release command to generate the release + + * https://releases.openstack.org/reference/using.html#using-new-release-command + +Summit +------ + +* Prepare the project update presentation. Enlist help of others + +* Prepare the on-boarding session materials. Enlist help of others + +A few weeks before milestone 2 +------------------------------ + +* Plan a spec review day (optional) + +* Periodically check the series goals others have proposed in the “Set series + goals” link: + + * Example: https://blueprints.launchpad.net/nova/stein/+setgoals + +Milestone 2 +----------- + +* Spec freeze + +* Release nova and python-novaclient + +* Release other libraries as needed + +* Stable branch releases of nova + +* For nova, set the launchpad milestone release as “released” with the date + +Shortly after spec freeze +------------------------- + +* Create a blueprint status etherpad to help track, especially non-priority + blueprint work, to help things get done by Feature Freeze (FF). Example: + + * https://etherpad.openstack.org/p/nova-stein-blueprint-status + +* Create or review a patch to add the next release’s specs directory so people + can propose specs for next release after spec freeze for current release + +Non-client library release freeze +--------------------------------- + +* Final release for os-vif + +Milestone 3 +----------- + +* Feature freeze day + +* Client library freeze, release python-novaclient + +* Close out all blueprints, including “catch all” blueprints like mox, + versioned notifications + +* Stable branch releases of nova + +* For nova, set the launchpad milestone release as “released” with the date + +Week following milestone 3 +-------------------------- + +* Consider announcing the FFE (feature freeze exception process) to have people + propose FFE requests to a special etherpad where they will be reviewed and + possibly sponsored: + + * https://docs.openstack.org/nova/latest/contributor/process.html#non-priority-feature-freeze + + .. note:: + + if there is only a short time between FF and RC1 (lately it’s been 2 + weeks), then the only likely candidates will be low-risk things that are + almost done + +* Mark the max microversion for the release in the + :doc:`/reference/api-microversion-history`: + + * Example: https://review.opendev.org/c/openstack/nova/+/719313 + +A few weeks before RC +--------------------- + +* Make a RC1 todos etherpad and tag bugs as ``-rc-potential`` and keep + track of them, example: + + * https://etherpad.openstack.org/p/nova-stein-rc-potential + +* Go through the bug list and identify any rc-potential bugs and tag them + +RC +-- + +* Do steps described on the release checklist wiki: + + * https://wiki.openstack.org/wiki/Nova/ReleaseChecklist + +* If we want to drop backward-compat RPC code, we have to do a major RPC + version bump and coordinate it just before the major release: + + * https://wiki.openstack.org/wiki/RpcMajorVersionUpdates + + * Example: https://review.opendev.org/541035 + +* “Merge latest translations" means translation patches + + * Check for translations with: + + * https://review.opendev.org/#/q/status:open+project:openstack/nova+branch:master+topic:zanata/translations + +* Should NOT plan to have more than one RC if possible. RC2 should only happen + if there was a mistake and something was missed for RC, or a new regression + was discovered + +* Do the RPC version aliases just before RC1 if no further RCs are planned. + Else do them at RC2. In the past, we used to update all service version + aliases (example: https://review.opendev.org/230132) but since we really + only support compute being backlevel/old during a rolling upgrade, we only + need to update the compute service alias, see related IRC discussion: + http://eavesdrop.openstack.org/irclogs/%23openstack-nova/%23openstack-nova.2018-08-08.log.html#t2018-08-08T17:13:45 + + * Example: https://review.opendev.org/642599 + + * More detail on how version aliases work: https://docs.openstack.org/nova/latest/configuration/config.html#upgrade-levels + +* Write the reno prelude for the release GA + + * Example: https://review.opendev.org/644412 + +* Write the cycle-highlights in marketing-friendly sentences and propose to the + openstack/releases repo. Usually based on reno prelude but made more readable + and friendly + + * Example: https://review.opendev.org/644697 + +Immediately after RC +-------------------- + +* Look for bot proposed changes to reno and stable/ + +* Follow the post-release checklist + + * https://wiki.openstack.org/wiki/Nova/ReleaseChecklist + + * Add database migration placeholders + + * Example: https://review.opendev.org/650964 + + * Drop old RPC compat code (if there was a RPC major version bump) + + * Example: https://review.opendev.org/543580 + + * Bump the oldest supported compute service version + + * https://review.opendev.org/#/c/738482/ + +* Create the launchpad series for the next cycle + +* Set the development focus of the project to the new cycle series + +* Set the status of the new series to “active development” + +* Set the last series status to “current stable branch release” + +* Set the previous to last series status to “supported” + +* Repeat launchpad steps ^ for python-novaclient + +* Register milestones in launchpad for the new cycle based on the new cycle + release schedule + +* Make sure the specs directory for the next cycle gets created so people can + start proposing new specs + +* Make sure to move implemented specs from the previous release + + * Use ``tox -e move-implemented-specs `` + + * Also remove template from ``doc/source/specs//index.rst`` + + * Also delete template file ``doc/source/specs//template.rst`` + +* Create new release wiki: + + * Example: https://wiki.openstack.org/wiki/Nova/Train_Release_Schedule + +* Update the contributor guide for the new cycle + + * Example: https://review.opendev.org/#/c/754427/ + +Miscellaneous Notes +------------------- + +How to approve a launchpad blueprint +************************************ + +* Set the approver as the person who +W the spec, or set to self if it’s + specless + +* Set the Direction => Approved and Definition => Approved and make sure the + Series goal is set to the current release. If code is already proposed, set + Implementation => Needs Code Review + +* Add a comment to the Whiteboard explaining the approval, with a date + (launchpad does not record approval dates). For example: “We discussed this + in the team meeting and agreed to approve this for . -- + ” + +How to complete a launchpad blueprint +************************************* + +* Set Implementation => Implemented. The completion date will be recorded by + launchpad diff --git a/doc/source/contributor/releasenotes.rst b/doc/source/contributor/releasenotes.rst index 996889f43ea..182defd1d5e 100644 --- a/doc/source/contributor/releasenotes.rst +++ b/doc/source/contributor/releasenotes.rst @@ -6,9 +6,10 @@ Release Notes What is reno ? -------------- -Nova uses :reno-doc:`reno <>` for providing release notes in-tree. That means -that a patch can include a *reno file* or a series can have a follow-on change -containing that file explaining what the impact is. +Nova uses `reno `__ for providing +release notes in-tree. That means that a patch can include a *reno file* or a +series can have a follow-on change containing that file explaining what the +impact is. A *reno file* is a YAML file written in the ``releasenotes/notes`` tree which is generated using the *reno* tool this way: @@ -20,7 +21,9 @@ is generated using the *reno* tool this way: where usually ```` can be ``bp-`` for a blueprint or ``bug-XXXXXX`` for a bugfix. -Refer to the :reno-doc:`reno documentation ` for more information. +Refer to the `reno documentation +`__ for more +information. When a release note is needed diff --git a/doc/source/contributor/resize-and-cold-migrate.rst b/doc/source/contributor/resize-and-cold-migrate.rst new file mode 100644 index 00000000000..b425b37e863 --- /dev/null +++ b/doc/source/contributor/resize-and-cold-migrate.rst @@ -0,0 +1,198 @@ +======================= +Resize and cold migrate +======================= + +The `resize API`_ and `cold migrate API`_ are commonly confused in nova because +the internal `API code`_, `conductor code`_ and `compute code`_ use the same +methods. This document explains some of the differences in what +happens between a resize and cold migrate operation. + +For the most part this document describes +:term:`same-cell resize `. +For details on :term:`cross-cell resize `, refer to +:doc:`/admin/configuration/cross-cell-resize`. + +High level +~~~~~~~~~~ + +:doc:`Cold migrate ` is an operation performed by an +administrator to power off and move a server from one host to a **different** +host using the **same** flavor. Volumes and network interfaces are disconnected +from the source host and connected on the destination host. The type of file +system between the hosts and image backend determine if the server files and +disks have to be copied. If copy is necessary then root and ephemeral disks are +copied and swap disks are re-created. + +:doc:`Resize ` is an operation which can be performed by a +non-administrative owner of the server (the user) with a **different** flavor. +The new flavor can change certain aspects of the server such as the number of +CPUS, RAM and disk size. Otherwise for the most part the internal details are +the same as a cold migration. + +Scheduling +~~~~~~~~~~ + +Depending on how the API is configured for +:oslo.config:option:`allow_resize_to_same_host`, the server may be able to be +resized on the current host. *All* compute drivers support *resizing* to the +same host but *only* the vCenter driver supports *cold migrating* to the same +host. Enabling resize to the same host is necessary for features such as +strict affinity server groups where there are more than one server in the same +affinity group. + +Starting with `microversion 2.56`_ an administrator can specify a destination +host for the cold migrate operation. Resize does not allow specifying a +destination host. + +Flavor +~~~~~~ + +As noted above, with resize the flavor *must* change and with cold migrate the +flavor *will not* change. + +Resource claims +~~~~~~~~~~~~~~~ + +Both resize and cold migration perform a `resize claim`_ on the destination +node. Historically the resize claim was meant as a safety check on the selected +node to work around race conditions in the scheduler. Since the scheduler +started `atomically claiming`_ VCPU, MEMORY_MB and DISK_GB allocations using +Placement the role of the resize claim has been reduced to detecting the same +conditions but for resources like PCI devices and NUMA topology which, at least +as of the 20.0.0 (Train) release, are not modeled in Placement and as such are +not atomic. + +If this claim fails, the operation can be rescheduled to an alternative +host, if there are any. The number of possible alternative hosts is determined +by the :oslo.config:option:`scheduler.max_attempts` configuration option. + +Allocations +~~~~~~~~~~~ + +Since the 16.0.0 (Pike) release, the scheduler uses the `placement service`_ +to filter compute nodes (resource providers) based on information in the flavor +and image used to build the server. Once the scheduler runs through its filters +and weighers and picks a host, resource class `allocations`_ are atomically +consumed in placement with the server as the consumer. + +During both resize and cold migrate operations, the allocations held by the +server consumer against the source compute node resource provider are `moved`_ +to a `migration record`_ and the scheduler will create allocations, held by the +instance consumer, on the selected destination compute node resource provider. +This is commonly referred to as `migration-based allocations`_ which were +introduced in the 17.0.0 (Queens) release. + +If the operation is successful and confirmed, the source node allocations held +by the migration record are `dropped`_. If the operation fails or is reverted, +the source compute node resource provider allocations held by the migration +record are `reverted`_ back to the instance consumer and the allocations +against the destination compute node resource provider are dropped. + +Summary of differences +~~~~~~~~~~~~~~~~~~~~~~ + +.. list-table:: + :header-rows: 1 + + * - + - Resize + - Cold migrate + * - New flavor + - Yes + - No + * - Authorization (default) + - Admin or owner (user) + + Policy rule: ``os_compute_api:servers:resize`` + - Admin only + + Policy rule: ``os_compute_api:os-migrate-server:migrate`` + * - Same host + - Maybe + - Only vCenter + * - Can specify target host + - No + - Yes (microversion >= 2.56) + +Sequence Diagrams +~~~~~~~~~~~~~~~~~ + +The following diagrams are current as of the 21.0.0 (Ussuri) release. + +Resize +------ + +This is the sequence of calls to get the server to ``VERIFY_RESIZE`` status. + +.. seqdiag:: + + seqdiag { + API; Conductor; Scheduler; Source; Destination; + edge_length = 300; + span_height = 15; + activation = none; + default_note_color = white; + + API -> Conductor [label = "cast", note = "resize_instance/migrate_server"]; + Conductor => Scheduler [label = "call", note = "select_destinations"]; + Conductor -> Destination [label = "cast", note = "prep_resize"]; + Source <- Destination [label = "cast", leftnote = "resize_instance"]; + Source -> Destination [label = "cast", note = "finish_resize"]; + } + +Confirm resize +-------------- + +This is the sequence of calls when confirming `or deleting`_ a server in +``VERIFY_RESIZE`` status. + +Note that in the below diagram, if confirming a resize while deleting a server +the API synchronously calls the source compute service. + +.. seqdiag:: + + seqdiag { + API; Source; + edge_length = 300; + span_height = 15; + activation = none; + default_note_color = white; + + API -> Source [label = "cast (or call if deleting)", note = "confirm_resize"]; + } + +Revert resize +------------- + +This is the sequence of calls when reverting a server in ``VERIFY_RESIZE`` +status. + +.. seqdiag:: + + seqdiag { + API; Source; Destination; + edge_length = 300; + span_height = 15; + activation = none; + default_note_color = white; + + API -> Destination [label = "cast", note = "revert_resize"]; + Source <- Destination [label = "cast", leftnote = "finish_revert_resize"]; + } + +.. _resize API: https://docs.openstack.org/api-ref/compute/#resize-server-resize-action +.. _cold migrate API: https://docs.openstack.org/api-ref/compute/#migrate-server-migrate-action +.. _API code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/api.py#L3568 +.. _conductor code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/conductor/manager.py#L297 +.. _compute code: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L4445 +.. _microversion 2.56: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id52 +.. _resize claim: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/resource_tracker.py#L248 +.. _atomically claiming: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/scheduler/filter_scheduler.py#L239 +.. _moved: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/conductor/tasks/migrate.py#L28 +.. _placement service: https://docs.openstack.org/placement/latest/ +.. _allocations: https://docs.openstack.org/api-ref/placement/#allocations +.. _migration record: https://docs.openstack.org/api-ref/compute/#migrations-os-migrations +.. _migration-based allocations: https://specs.openstack.org/openstack/nova-specs/specs/queens/implemented/migration-allocations.html +.. _dropped: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L4048 +.. _reverted: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/manager.py#L4233 +.. _or deleting: https://opendev.org/openstack/nova/src/tag/19.0.0/nova/compute/api.py#L2135 diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst index e78ef49fe4b..f149ee28e82 100644 --- a/doc/source/contributor/testing.rst +++ b/doc/source/contributor/testing.rst @@ -32,6 +32,13 @@ For details on plans to report the current test coverage, refer to Running tests and reporting results =================================== +Running tests locally +--------------------- + +Please see +https://opendev.org/openstack/nova/src/branch/master/HACKING.rst#running-tests + + Voting in Gerrit ---------------- @@ -79,13 +86,22 @@ The correct level of unit test coverage is very subjective, and as such we are not aiming for a particular percentage of coverage, rather we are aiming for good coverage. Generally, every code change should have a related unit test: -https://github.com/openstack/nova/blob/master/HACKING.rst#creating-unit-tests +https://opendev.org/openstack/nova/src/branch/master/HACKING.rst#creating-unit-tests Integration tests ----------------- Today, our integration tests involve running the Tempest test suite on a -variety of Nova deployment scenarios. +variety of Nova deployment scenarios. The integration job setup is defined +in the ``.zuul.yaml`` file in the root of the nova repository. Jobs are +restricted by queue: + +* ``check``: jobs in this queue automatically run on all proposed changes even + with non-voting jobs +* ``gate``: jobs in this queue automatically run on all approved changes + (voting jobs only) +* ``experimental``: jobs in this queue are non-voting and run on-demand by + leaving a review comment on the change of "check experimental" In addition, we have third parties running the tests on their preferred Nova deployment scenario. @@ -107,4 +123,4 @@ Interoperability tests The DefCore committee maintains a list that contains a subset of Tempest tests. These are used to verify if a particular Nova deployment's API responds as -expected. For more details, see: https://github.com/openstack/defcore +expected. For more details, see: https://opendev.org/osf/interop diff --git a/doc/source/contributor/testing/down-cell.rst b/doc/source/contributor/testing/down-cell.rst new file mode 100644 index 00000000000..98065f4bdaf --- /dev/null +++ b/doc/source/contributor/testing/down-cell.rst @@ -0,0 +1,238 @@ +================== +Testing Down Cells +================== + +This document describes how to recreate a down-cell scenario in a single-node +devstack environment. This can be useful for testing the reliability of the +controller services when a cell in the deployment is down. + + +Setup +===== + +DevStack config +--------------- + +This guide is based on a devstack install from the Train release using +an Ubuntu Bionic 18.04 VM with 8 VCPU, 8 GB RAM and 200 GB of disk following +the `All-In-One Single Machine`_ guide. + +The following minimal local.conf was used: + +.. code-block:: ini + + [[local|localrc]] + # Define passwords + OS_PASSWORD=openstack1 + SERVICE_TOKEN=$OS_PASSWORD + ADMIN_PASSWORD=$OS_PASSWORD + MYSQL_PASSWORD=$OS_PASSWORD + RABBIT_PASSWORD=$OS_PASSWORD + SERVICE_PASSWORD=$OS_PASSWORD + # Logging config + LOGFILE=$DEST/logs/stack.sh.log + LOGDAYS=2 + # Disable non-essential services + disable_service horizon tempest + +.. _All-In-One Single Machine: https://docs.openstack.org/devstack/latest/guides/single-machine.html + +Populate cell1 +-------------- + +Create a test server first so there is something in cell1: + +.. code-block:: console + + $ source openrc admin admin + $ IMAGE=$(openstack image list -f value -c ID) + $ openstack server create --wait --flavor m1.tiny --image $IMAGE cell1-server + + +Take down cell1 +=============== + +Break the connection to the cell1 database by changing the +``database_connection`` URL, in this case with an invalid host IP: + +.. code-block:: console + + mysql> select database_connection from cell_mappings where name='cell1'; + +-------------------------------------------------------------------+ + | database_connection | + +-------------------------------------------------------------------+ + | mysql+pymysql://root:openstack1@127.0.0.1/nova_cell1?charset=utf8 | + +-------------------------------------------------------------------+ + 1 row in set (0.00 sec) + + mysql> update cell_mappings set database_connection='mysql+pymysql://root:openstack1@192.0.0.1/nova_cell1?charset=utf8' where name='cell1'; + Query OK, 1 row affected (0.01 sec) + Rows matched: 1 Changed: 1 Warnings: 0 + + +Update controller services +========================== + +Prepare the controller services for the down cell. See +:ref:`Handling cell failures ` for details. + +Modify nova.conf +---------------- + +Configure the API to avoid long timeouts and slow start times due to +`bug 1815697`_ by modifying ``/etc/nova/nova.conf``: + +.. code-block:: ini + + [database] + ... + max_retries = 1 + retry_interval = 1 + + [upgrade_levels] + ... + compute = stein # N-1 from train release, just something other than "auto" + +.. _bug 1815697: https://bugs.launchpad.net/nova/+bug/1815697 + +Restart services +---------------- + +.. note:: It is useful to tail the n-api service logs in another screen to + watch for errors / warnings in the logs due to down cells: + + .. code-block:: console + + $ sudo journalctl -f -a -u devstack@n-api.service + +Restart controller services to flush the cell cache: + +.. code-block:: console + + $ sudo systemctl restart devstack@n-api.service devstack@n-super-cond.service devstack@n-sch.service + + +Test cases +========== + +1. Try to create a server which should fail and go to cell0. + + .. code-block:: console + + $ openstack server create --wait --flavor m1.tiny --image $IMAGE cell0-server + + You can expect to see errors like this in the n-api logs: + + .. code-block:: console + + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context [None req-fdaff415-48b9-44a7-b4c3-015214e80b90 None None] Error gathering result from cell 4f495a21-294a-4051-9a3d-8b34a250bbb4: DBConnectionError: (pymysql.err.OperationalError) (2003, "Can't connect to MySQL server on u'192.0.0.1' ([Errno 101] ENETUNREACH)") (Background on this error at: http://sqlalche.me/e/e3q8) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context Traceback (most recent call last): + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/opt/stack/nova/nova/context.py", line 441, in gather_result + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context result = fn(cctxt, *args, **kwargs) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/opt/stack/nova/nova/db/sqlalchemy/api.py", line 211, in wrapper + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context with reader_mode.using(context): + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__ + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context return self.gen.next() + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 1061, in _transaction_scope + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context context=context) as resource: + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__ + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context return self.gen.next() + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 659, in _session + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context bind=self.connection, mode=self.mode) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 418, in _create_session + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context self._start() + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 510, in _start + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context engine_args, maker_args) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/enginefacade.py", line 534, in _setup_for_connection + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context sql_connection=sql_connection, **engine_kwargs) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/debtcollector/renames.py", line 43, in decorator + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context return wrapped(*args, **kwargs) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/engines.py", line 201, in create_engine + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context test_conn = _test_connection(engine, max_retries, retry_interval) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "/usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/engines.py", line 387, in _test_connection + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context six.reraise(type(de_ref), de_ref) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context File "", line 3, in reraise + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context DBConnectionError: (pymysql.err.OperationalError) (2003, "Can't connect to MySQL server on u'192.0.0.1' ([Errno 101] ENETUNREACH)") (Background on this error at: http://sqlalche.me/e/e3q8) + Apr 04 20:48:22 train devstack@n-api.service[10884]: ERROR nova.context + Apr 04 20:48:22 train devstack@n-api.service[10884]: WARNING nova.objects.service [None req-1cf4bf5c-2f74-4be0-a18d-51ff81df57dd admin admin] Failed to get minimum service version for cell 4f495a21-294a-4051-9a3d-8b34a250bbb4 + +2. List servers with the 2.69 microversion for down cells. + + .. note:: Requires python-openstackclient >= 3.18.0 for v2.69 support. + + The server in cell1 (which is down) will show up with status UNKNOWN: + + .. code-block:: console + + $ openstack --os-compute-api-version 2.69 server list + +--------------------------------------+--------------+---------+----------+--------------------------+--------+ + | ID | Name | Status | Networks | Image | Flavor | + +--------------------------------------+--------------+---------+----------+--------------------------+--------+ + | 8e90f1f0-e8dd-4783-8bb3-ec8d594e60f1 | | UNKNOWN | | | | + | afd45d84-2bd7-4e49-9dff-93359f742bc1 | cell0-server | ERROR | | cirros-0.4.0-x86_64-disk | | + +--------------------------------------+--------------+---------+----------+--------------------------+--------+ + +3. Using v2.1 the UNKNOWN server is filtered out by default due to + :oslo.config:option:`api.list_records_by_skipping_down_cells`: + + .. code-block:: console + + $ openstack --os-compute-api-version 2.1 server list + +--------------------------------------+--------------+--------+----------+--------------------------+---------+ + | ID | Name | Status | Networks | Image | Flavor | + +--------------------------------------+--------------+--------+----------+--------------------------+---------+ + | afd45d84-2bd7-4e49-9dff-93359f742bc1 | cell0-server | ERROR | | cirros-0.4.0-x86_64-disk | m1.tiny | + +--------------------------------------+--------------+--------+----------+--------------------------+---------+ + +4. Configure nova-api with ``list_records_by_skipping_down_cells=False`` + + .. code-block:: ini + + [api] + list_records_by_skipping_down_cells = False + +5. Restart nova-api and then listing servers should fail: + + .. code-block:: console + + $ sudo systemctl restart devstack@n-api.service + $ openstack --os-compute-api-version 2.1 server list + Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ and attach the Nova API log if possible. + (HTTP 500) (Request-ID: req-e2264d67-5b6c-4f17-ae3d-16c7562f1b69) + +6. Try listing compute services with a down cell. + + The services from the down cell are skipped: + + .. code-block:: console + + $ openstack --os-compute-api-version 2.1 compute service list + +----+------------------+-------+----------+---------+-------+----------------------------+ + | ID | Binary | Host | Zone | Status | State | Updated At | + +----+------------------+-------+----------+---------+-------+----------------------------+ + | 2 | nova-scheduler | train | internal | enabled | up | 2019-04-04T21:12:47.000000 | + | 6 | nova-consoleauth | train | internal | enabled | up | 2019-04-04T21:12:38.000000 | + | 7 | nova-conductor | train | internal | enabled | up | 2019-04-04T21:12:47.000000 | + +----+------------------+-------+----------+---------+-------+----------------------------+ + + With 2.69 the nova-compute service from cell1 is shown with status UNKNOWN: + + .. code-block:: console + + $ openstack --os-compute-api-version 2.69 compute service list + +--------------------------------------+------------------+-------+----------+---------+-------+----------------------------+ + | ID | Binary | Host | Zone | Status | State | Updated At | + +--------------------------------------+------------------+-------+----------+---------+-------+----------------------------+ + | f68a96d9-d994-4122-a8f9-1b0f68ed69c2 | nova-scheduler | train | internal | enabled | up | 2019-04-04T21:13:47.000000 | + | 70cd668a-6d60-4a9a-ad83-f863920d4c44 | nova-consoleauth | train | internal | enabled | up | 2019-04-04T21:13:38.000000 | + | ca88f023-1de4-49e0-90b0-581e16bebaed | nova-conductor | train | internal | enabled | up | 2019-04-04T21:13:47.000000 | + | | nova-compute | train | | UNKNOWN | | | + +--------------------------------------+------------------+-------+----------+---------+-------+----------------------------+ + + +Future +====== + +This guide could be expanded for having multiple non-cell0 cells where one +cell is down while the other is available and go through scenarios where the +down cell is marked as disabled to take it out of scheduling consideration. diff --git a/doc/source/contributor/testing/eventlet-profiling.rst b/doc/source/contributor/testing/eventlet-profiling.rst new file mode 100644 index 00000000000..96c58ba9112 --- /dev/null +++ b/doc/source/contributor/testing/eventlet-profiling.rst @@ -0,0 +1,274 @@ +======================= +Profiling With Eventlet +======================= + +When performance of one of the Nova services is worse than expected, and other +sorts of analysis do not lead to candidate fixes, profiling is an excellent +tool for producing detailed analysis of what methods in the code are called the +most and which consume the most time. + +Because most Nova services use eventlet_, the standard profiling tool provided +with Python, cProfile_, will not work. Something is required to keep track of +changing tasks. Thankfully eventlet comes with +``eventlet.green.profile.Profile``, a mostly undocumented class that provides a +similar (but not identical) API to the one provided by Python's ``Profile`` +while outputting the same format. + +.. note:: The eventlet Profile outputs the ``prof`` format produced by + ``profile``, which is not the same as that output by ``cProfile``. + Some analysis tools (for example, SnakeViz_) only read the latter + so the options for analyzing eventlet profiling are not always + deluxe (see below). + +Setup +===== + +This guide assumes the Nova service being profiled is running devstack, but +that is not necessary. What is necessary is that the code associated with the +service can be changed and the service restarted, in place. + +Profiling the entire service will produce mostly noise and the output will be +confusing because different tasks will operate during the profile run. It is +better to begin the process with a candidate task or method *within* the +service that can be associated with an identifier. For example, +``select_destinations`` in the ``SchedulerManager`` can be associated with the +list of ``instance_uuids`` passed to it and it runs only once for that set of +instance UUIDs. + +The process for profiling is: + +#. Identify the method to be profiled. + +#. Populate the environment with sufficient resources to exercise the code. For + example you may wish to use the FakeVirtDriver_ to have nova aware of + multiple ``nova-compute`` processes. Or you may wish to launch many + instances if you are evaluating a method that loops over instances. + +#. At the start of that method, change the code to instantiate a ``Profile`` + object and ``start()`` it. + +#. At the end of that method, change the code to ``stop()`` profiling and write + the data (with ``dump_stats()``) to a reasonable location. + +#. Restart the service. + +#. Cause the method being evaluated to run. + +#. Analyze the profile data with the pstats_ module. + +.. note:: ``stop()`` and ``start()`` are two of the ways in which the eventlet + ``Profile`` API differs from the stdlib. There the methods are + ``enable()`` and ``disable()``. + +Example +======= + +For this example we will analyze ``select_destinations`` in the +``FilterScheduler``. A known problem is that it does excessive work when +presented with too many candidate results from the Placement service. We'd like +to know why. + +We'll configure and run devstack_ with FakeVirtDriver_ so there are several +candidate hypervisors (the following ``local.conf`` is also useful for other +profiling and benchmarking scenarios so not all changes are relevant here): + +.. code-block:: ini + + [[local|localrc]] + ADMIN_PASSWORD=secret + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + VIRT_DRIVER=fake + # You may use different numbers of fake computes, but be careful: 100 will + # completely overwhelm a 16GB, 16VPCU server. In the test profiles below a + # value of 50 was used, on a 16GB, 16VCPU server. + NUMBER_FAKE_NOVA_COMPUTE=25 + disable_service cinder + disable_service horizon + disable_service dstat + disable_service tempest + + [[post-config|$NOVA_CONF]] + rpc_response_timeout = 300 + + # Disable filtering entirely. For some profiling this will not be what you + # want. + [filter_scheduler] + enabled_filters = '""' + # Send only one type of notifications to avoid notification overhead. + [notifications] + notification_format = unversioned + +Change the code in ``nova/scheduler/manager.py`` as follows to start the +profiler at the start of the ``_select_destinations`` call and to dump the +statistics at the end. For example: + +.. code-block:: diff + + diff --git nova/scheduler/manager.py nova/scheduler/manager.py + index 9cee6b3bfc..4859b21fb1 100644 + --- nova/scheduler/manager.py + +++ nova/scheduler/manager.py + @@ -237,6 +237,10 @@ class SchedulerManager(manager.Manager): + alloc_reqs_by_rp_uuid, provider_summaries, + allocation_request_version=None, return_alternates=False, + ): + + from eventlet.green import profile + + pr = profile.Profile() + + pr.start() + + + self.notifier.info( + context, 'scheduler.select_destinations.start', + {'request_spec': spec_obj.to_legacy_request_spec_dict()}) + @@ -260,6 +264,9 @@ class SchedulerManager(manager.Manager): + action=fields_obj.NotificationAction.SELECT_DESTINATIONS, + phase=fields_obj.NotificationPhase.END) + + + pr.stop() + + pr.dump_stats('/tmp/select_destinations/%s.prof' % ':'.join(instance_uuids)) + + + return selections + + def _schedule( + +Make a ``/tmp/select_destinations`` directory that is writable by the user +nova-scheduler will run as. This is where the profile output will go. + +Restart the scheduler service. Note that ``systemctl restart`` may not kill +things sufficiently dead, so:: + + sudo systemctl stop devstack@n-sch + sleep 5 + sudo systemctl start devstack@n-sch + +Create a server (which will call ``select_destinations``):: + + openstack server create --image cirros-0.4.0-x86_64-disk --flavor c1 x1 + +In ``/tmp/select_destinations`` there should be a file with a name using the +UUID of the created server with a ``.prof`` extension. + +Change to that directory and view the profile using the pstats +`interactive mode`_:: + + python3 -m pstats ef044142-f3b8-409d-9af6-c60cea39b273.prof + +.. note:: The major version of python used to analyze the profile data must be + the same as the version used to run the process being profiled. + +Sort stats by their cumulative time:: + + ef044142-f3b8-409d-9af6-c60cea39b273.prof% sort cumtime + ef044142-f3b8-409d-9af6-c60cea39b273.prof% stats 10 + Tue Aug 6 17:17:56 2019 ef044142-f3b8-409d-9af6-c60cea39b273.prof + + 603477 function calls (587772 primitive calls) in 2.294 seconds + + Ordered by: cumulative time + List reduced from 2484 to 10 due to restriction <10> + + ncalls tottime percall cumtime percall filename:lineno(function) + 1 0.000 0.000 1.957 1.957 profile:0(start) + 1 0.000 0.000 1.911 1.911 /mnt/share/opt/stack/nova/nova/scheduler/filter_scheduler.py:113(_schedule) + 1 0.000 0.000 1.834 1.834 /mnt/share/opt/stack/nova/nova/scheduler/filter_scheduler.py:485(_get_all_host_states) + 1 0.000 0.000 1.834 1.834 /mnt/share/opt/stack/nova/nova/scheduler/host_manager.py:757(get_host_states_by_uuids) + 1 0.004 0.004 1.818 1.818 /mnt/share/opt/stack/nova/nova/scheduler/host_manager.py:777(_get_host_states) + 104/103 0.001 0.000 1.409 0.014 /usr/local/lib/python3.6/dist-packages/oslo_versionedobjects/base.py:170(wrapper) + 50 0.001 0.000 1.290 0.026 /mnt/share/opt/stack/nova/nova/scheduler/host_manager.py:836(_get_instance_info) + 50 0.001 0.000 1.289 0.026 /mnt/share/opt/stack/nova/nova/scheduler/host_manager.py:820(_get_instances_by_host) + 103 0.001 0.000 0.890 0.009 /usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py:3325(__iter__) + 50 0.001 0.000 0.776 0.016 /mnt/share/opt/stack/nova/nova/objects/host_mapping.py:99(get_by_host) + +From this we can make a couple of useful inferences about ``get_by_host``: + +* It is called once for each of the 50 ``FakeVirtDriver`` hypervisors + configured for these tests. + +* It (and the methods it calls internally) consumes about 40% of the entire + time spent running (``0.776 / 1.957``) the ``select_destinations`` method + (indicated by ``profile:0(start)``, above). + +Several other sort modes can be used. List those that are available by entering +``sort`` without arguments. + +Caveats +======= + +Real world use indicates that the eventlet profiler is not perfect. There are +situations where it will not always track switches between greenlets as well as +it could. This can result in profile data that does not make sense or random +slowdowns in the system being profiled. There is no one size fits all solution +to these issues; profiling eventlet services is more an art than science. +However, this section tries to provide a (hopefully) growing body of advice on +what to do to work around problems. + +General Advice +-------------- + +* Try to profile chunks of code that operate mostly within one module or class + and do not have many collaborators. The more convoluted the path through + the code, the more confused the profiler gets. + +* Similarly, where possible avoid profiling code that will trigger many + greenlet context switches; either specific spawns, or multiple types of I/O. + Instead, narrow the focus of the profiler. + +* If possible, avoid RPC. + +In nova-compute +--------------- + +The creation of this caveat section was inspired by issues experienced while +profiling ``nova-compute``. The ``nova-compute`` process is not allowed to +speak with a database server directly. Instead communication is mediated +through the conductor, communication happening via ``oslo.versionedobjects`` +and remote calls. Profiling methods such as ``update_available_resource`` in +the ResourceTracker, which needs information from the database, results in +profile data that can be analyzed but is incorrect and misleading. + +This can be worked around by temporarily changing ``nova-compute`` to allow it +to speak to the database directly: + +.. code-block:: diff + + diff --git a/nova/cmd/compute.py b/nova/cmd/compute.py + index 01fd20de2e..655d503158 100644 + --- a/nova/cmd/compute.py + +++ b/nova/cmd/compute.py + @@ -50,8 +50,10 @@ def main(): + + gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) + + - cmd_common.block_db_access('nova-compute') + - objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() + + # Temporarily allow access to the database. You must update the config file + + # used by this process to set [database]/connection to the cell1 database. + + # cmd_common.block_db_access('nova-compute') + + # objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() + objects.Service.enable_min_version_cache() + server = service.Service.create(binary='nova-compute', + topic=compute_rpcapi.RPC_TOPIC) + +The configuration file used by the ``nova-compute`` process must also be +updated to ensure that it contains a setting for the relevant database: + +.. code-block:: ini + + [database] + connection = mysql+pymysql://root:secret@127.0.0.1/nova_cell1?charset=utf8 + +In a single node devstack setup ``nova_cell1`` is the right choice. The +connection string will vary in other setups. + +Once these changes are made, along with the profiler changes indicated in the +example above, ``nova-compute`` can be restarted and with luck some useful +profiling data will emerge. + +.. _eventlet: https://eventlet.net/ +.. _cProfile: https://docs.python.org/3/library/profile.html +.. _SnakeViz: https://jiffyclub.github.io/snakeviz/ +.. _devstack: https://docs.openstack.org/devstack/latest/ +.. _FakeVirtDriver: https://docs.openstack.org/devstack/latest/guides/nova.html#fake-virt-driver +.. _pstats: https://docs.python.org/3/library/profile.html#pstats.Stats +.. _interactive mode: https://www.stefaanlippens.net/python_profiling_with_pstats_interactive_mode/ diff --git a/doc/source/contributor/testing/libvirt-numa.rst b/doc/source/contributor/testing/libvirt-numa.rst index b95d72ce18d..589532373bd 100644 --- a/doc/source/contributor/testing/libvirt-numa.rst +++ b/doc/source/contributor/testing/libvirt-numa.rst @@ -55,13 +55,13 @@ guest with 8 virtual CPUs, 8 GB of RAM and 20 GB of disk space: .. code-block:: bash # cd /var/lib/libvirt/images - # wget https://download.fedoraproject.org/pub/fedora/linux/releases/24/Server/x86_64/iso/Fedora-Server-netinst-x86_64-24-1.2.iso + # wget https://download.fedoraproject.org/pub/fedora/linux/releases/29/Server/x86_64/iso/Fedora-Server-netinst-x86_64-29-1.2.iso # virt-install \ - --name f24x86_64 \ + --name f29x86_64 \ --ram 8000 \ --vcpus 8 \ - --file /var/lib/libvirt/images/f24x86_64.img \ + --file /var/lib/libvirt/images/f29x86_64.img \ --file-size 20 --cdrom /var/lib/libvirt/images/Fedora-Server-netinst-x86_64-24-1.2.iso \ --os-variant fedora23 @@ -105,7 +105,7 @@ devstack repo: .. code-block:: bash $ sudo dnf install git - $ git clone git://github.com/openstack-dev/devstack.git + $ git clone https://opendev.org/openstack/devstack $ cd devstack At this point a fairly standard devstack setup can be done with one exception: @@ -133,11 +133,8 @@ For example: RABBIT_PASSWORD=123456 [[post-config|$NOVA_CONF]] - [DEFAULT] - firewall_driver=nova.virt.firewall.NoopFirewallDriver - [filter_scheduler] - enabled_filters=RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,PciPassthroughFilter,NUMATopologyFilter + enabled_filters=ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,PciPassthroughFilter,NUMATopologyFilter EOF $ FORCE=yes ./stack.sh @@ -164,7 +161,7 @@ of nova libvirt guests boot a tiny instance: .. code-block:: bash $ . openrc admin - $ openstack server create --image cirros-0.3.4-x86_64-uec --flavor m1.tiny \ + $ openstack server create --image cirros-0.4.0-x86_64-disk --flavor m1.tiny \ cirros1 The host will be reporting NUMA topology, but there should only be a single @@ -173,57 +170,87 @@ example (with object versioning fields removed): .. code-block:: bash - $ mysql -u root -p123456 nova + $ mysql -u root -p123456 nova_cell1 MariaDB [nova]> select numa_topology from compute_nodes; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "NUMATopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.2", | "nova_object.data": { | "cells": [{ - | "nova_object.name": "NUMACell", - | "nova_object.data": { - | "cpu_usage": 0, - | "memory_usage": 0, - | "cpuset": [0, 1, 2, 3, 4, 5, 6, 7], - | "pinned_cpus": [], - | "siblings": [], - | "memory": 7793, - | "mempages": [ - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 987430, - | "reserved":0, - | "size_kb": 4 - | }, - | }, - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 0, - | "reserved":0, - | "size_kb": 2048 - | }, - | }, - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 0, - | "reserved": 0, - | "size_kb": 1048576 - | }, - | } - | ], - | "id": 0 - | }, + | "nova_object.name": "NUMACell", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.4", + | "nova_object.data": { + | "id": 0, + | "cpuset": [0, 1, 2, 3, 4, 5, 6, 7], + | "pcpuset": [0, 1, 2, 3, 4, 5, 6, 7], + | "memory": 7975, + | "cpu_usage": 0, + | "memory_usage": 0, + | "pinned_cpus": [], + | "siblings": [ + | [0], + | [1], + | [2], + | [3], + | [4], + | [5], + | [6], + | [7] + | ], + | "mempages": [{ + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 4, + | "total": 2041795, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["size_kb", "total", "reserved", "used"] + | }, { + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 2048, + | "total": 0, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["size_kb", "total", "reserved", "used"] + | }, { + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 1048576, + | "total": 0, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["size_kb", "total", "reserved", "used"] + | }], + | "network_metadata": { + | "nova_object.name": "NetworkMetadata", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.0", + | "nova_object.data": { + | "physnets": [], + | "tunneled": false + | }, + | "nova_object.changes": ["tunneled", "physnets"] + | } | }, - | ] + | "nova_object.changes": ["pinned_cpus", "memory_usage", "siblings", "mempages", "memory", "id", "network_metadata", "cpuset", "cpu_usage", "pcpuset"] + | }] | }, + | "nova_object.changes": ["cells"] | } +----------------------------------------------------------------------------+ @@ -255,7 +282,7 @@ And now back on the physical host edit the guest config as root: .. code-block:: bash - $ sudo virsh edit f21x86_64 + $ sudo virsh edit f29x86_64 The first thing is to change the `` block to do passthrough of the host CPU. In particular this exposes the "SVM" or "VMX" feature bits to the guest so @@ -279,7 +306,7 @@ Now start the guest again: .. code-block:: bash - # virsh start f24x86_64 + # virsh start f29x86_64 ...and login back in: @@ -307,141 +334,207 @@ topology setup for the guest: .. code-block:: bash - $ mysql -u root -p123456 nova + $ mysql -u root -p123456 nova_cell1 MariaDB [nova]> select numa_topology from compute_nodes; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "NUMATopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.2", | "nova_object.data": { - | "cells": [ - | { - | "nova_object.name": "NUMACell", - | "nova_object.data": { - | "cpu_usage": 0, - | "memory_usage": 0, - | "cpuset": [0, 1, 2, 3], - | "pinned_cpus": [], - | "siblings": [], - | "memory": 3856, - | "mempages": [ - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 987231, - | "reserved": 0, - | "size_kb": 4 - | }, - | }, - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 0, - | "reserved": 0, - | "size_kb": 2048 - | }, - | }, - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 0, - | "reserved": 0, - | "size_kb": 1048576 - | }, - | } - | ], - | "id": 0 - | }, + | "cells": [{ + | "nova_object.name": "NUMACell", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.4", + | "nova_object.data": { + | "id": 0, + | "cpuset": [0, 1, 2, 3], + | "pcpuset": [0, 1, 2, 3], + | "memory": 3966, + | "cpu_usage": 0, + | "memory_usage": 0, + | "pinned_cpus": [], + | "siblings": [ + | [2], + | [0], + | [3], + | [1] + | ], + | "mempages": [{ + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 4, + | "total": 1015418, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }, { + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 2048, + | "total": 0, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }, { + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 1048576, + | "total": 0, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }], + | "network_metadata": { + | "nova_object.name": "NetworkMetadata", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.0", + | "nova_object.data": { + | "physnets": [], + | "tunneled": false + | }, + | "nova_object.changes": ["physnets", "tunneled"] + | } | }, - | { - | "nova_object.name": "NUMACell", - | "nova_object.data": { - | "cpu_usage": 0, - | "memory_usage": 0, - | "cpuset": [4, 5], - | "pinned_cpus": [], - | "siblings": [], - | "memory": 1969, - | "mempages": [ - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 504202, - | "reserved": 0, - | "size_kb": 4 - | }, - | }, - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 0, - | "reserved": 0, - | "size_kb": 2048 - | }, - | }, - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 0, - | "reserved": 0, - | "size_kb": 1048576 - | }, - | } - | ], - | "id": 1 - | }, + | "nova_object.changes": ["pinned_cpus", "siblings", "memory", "id", "cpuset", "network_metadata", "pcpuset", "mempages", "cpu_usage", "memory_usage"] + | }, { + | "nova_object.name": "NUMACell", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.4", + | "nova_object.data": { + | "id": 1, + | "cpuset": [4, 5], + | "pcpuset": [4, 5], + | "memory": 1994, + | "cpu_usage": 0, + | "memory_usage": 0, + | "pinned_cpus": [], + | "siblings": [ + | [5], + | [4] + | ], + | "mempages": [{ + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 4, + | "total": 510562, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }, { + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 2048, + | "total": 0, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }, { + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 1048576, + | "total": 0, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }], + | "network_metadata": { + | "nova_object.name": "NetworkMetadata", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.0", + | "nova_object.data": { + | "physnets": [], + | "tunneled": false + | }, + | "nova_object.changes": ["physnets", "tunneled"] + | } | }, - | { - | "nova_object.name": "NUMACell", - | "nova_object.data": { - | "cpu_usage": 0, - | "memory_usage": 0, - | "cpuset": [6, 7], - | "pinned_cpus": [], - | "siblings": [], - | "memory": 1967, - | "mempages": [ - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 503565, - | "reserved": 0, - | "size_kb": 4 - | }, - | }, - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 0, - | "reserved": 0, - | "size_kb": 2048 - | }, - | }, - | { - | "nova_object.name": "NUMAPagesTopology", - | "nova_object.data": { - | "used": 0, - | "total": 0, - | "reserved": 0, - | "size_kb": 1048576 - | }, - | } - | ], - | "id": 2 - | }, - | } - | ] + | "nova_object.changes": ["pinned_cpus", "siblings", "memory", "id", "cpuset", "network_metadata", "pcpuset", "mempages", "cpu_usage", "memory_usage"] + | }, { + | "nova_object.name": "NUMACell", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.4", + | "nova_object.data": { + | "id": 2, + | "cpuset": [6, 7], + | "pcpuset": [6, 7], + | "memory": 2014, + | "cpu_usage": 0, + | "memory_usage": 0, + | "pinned_cpus": [], + | "siblings": [ + | [7], + | [6] + | ], + | "mempages": [{ + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 4, + | "total": 515727, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }, { + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 2048, + | "total": 0, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }, { + | "nova_object.name": "NUMAPagesTopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.1", + | "nova_object.data": { + | "size_kb": 1048576, + | "total": 0, + | "used": 0, + | "reserved": 0 + | }, + | "nova_object.changes": ["total", "size_kb", "used", "reserved"] + | }], + | "network_metadata": { + | "nova_object.name": "NetworkMetadata", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.0", + | "nova_object.data": { + | "physnets": [], + | "tunneled": false + | }, + | "nova_object.changes": ["physnets", "tunneled"] + | } + | }, + | "nova_object.changes": ["pinned_cpus", "siblings", "memory", "id", "cpuset", "network_metadata", "pcpuset", "mempages", "cpu_usage", "memory_usage"] + | }] | }, - | } + | "nova_object.changes": ["cells"] +----------------------------------------------------------------------------+ This indeed shows that there are now 3 NUMA nodes for the "host" machine, the @@ -460,7 +553,7 @@ condition: .. code-block:: bash $ . openrc admin admin - $ openstack server create --image cirros-0.3.4-x86_64-uec --flavor m1.tiny \ + $ openstack server create --image cirros-0.4.0-x86_64-disk --flavor m1.tiny \ cirros1 Now look at the libvirt guest XML: @@ -498,7 +591,7 @@ Now boot the guest using this new flavor: .. code-block:: bash - $ openstack server create --image cirros-0.3.4-x86_64-uec --flavor m1.numa \ + $ openstack server create --image cirros-0.4.0-x86_64-disk --flavor m1.numa \ cirros2 Looking at the resulting guest XML from libvirt: @@ -551,30 +644,35 @@ database. This should match the ```` information: .. code-block:: bash - $ mysql -u root -p123456 nova + $ mysql -u root -p123456 nova_cell1 MariaDB [nova]> select numa_topology from instance_extra; +----------------------------------------------------------------------------+ | numa_topology | +----------------------------------------------------------------------------+ | { | "nova_object.name": "InstanceNUMATopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.3", | "nova_object.data": { - | "cells": [ - | { - | "nova_object.name": "InstanceNUMACell", - | "nova_object.data": { - | "pagesize": null, - | "cpu_topology": null, - | "cpuset": [0, 1, 2, 3], - | "cpu_policy": null, - | "memory": 1024, - | "cpu_pinning_raw": null, - | "id": 0, - | "cpu_thread_policy": null - | }, - | } - | ] + | "cells": [{ + | "nova_object.name": "InstanceNUMACell", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.4", + | "nova_object.data": { + | "id": 0, + | "cpuset": [0, 1, 2, 3], + | "memory": 1024, + | "pagesize": null, + | "cpu_pinning_raw": null, + | "cpu_policy": null, + | "cpu_thread_policy": null, + | "cpuset_reserved": null + | }, + | "nova_object.changes": ["id"] + | }], + | "emulator_threads_policy": null | }, + | "nova_object.changes": ["cells", "emulator_threads_policy"] | } +----------------------------------------------------------------------------+ @@ -600,7 +698,7 @@ Now boot the guest using this changed flavor: .. code-block:: bash - $ openstack server create --image cirros-0.3.4-x86_64-uec --flavor m1.numa \ + $ openstack server create --image cirros-0.4.0-x86_64-disk --flavor m1.numa \ cirros2 Looking at the resulting guest XML from libvirt: @@ -661,35 +759,42 @@ database. This should match the ```` information: +----------------------------------------------------------------------------+ | { | "nova_object.name": "InstanceNUMATopology", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.3", | "nova_object.data": { - | "cells": [ - | { - | "nova_object.name": "InstanceNUMACell", - | "nova_object.data": { - | "pagesize": null, - | "cpu_topology": null, - | "cpuset": [0, 1], - | "cpu_policy": null, - | "memory": 512, - | "cpu_pinning_raw": null, - | "id": 0, - | "cpu_thread_policy": null - | }, + | "cells": [{ + | "nova_object.name": "InstanceNUMACell", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.4", + | "nova_object.data": { + | "id": 0, + | "cpuset": [0, 1], + | "memory": 512, + | "pagesize": null, + | "cpu_pinning_raw": null, + | "cpu_policy": null, + | "cpu_thread_policy": null, + | "cpuset_reserved": null + | }, + | "nova_object.changes": ["id"] + | }, { + | "nova_object.name": "InstanceNUMACell", + | "nova_object.namespace": "nova", + | "nova_object.version": "1.4", + | "nova_object.data": { + | "id": 1, + | "cpuset": [2, 3], + | "memory": 512, + | "pagesize": null, + | "cpu_pinning_raw": null, + | "cpu_policy": null, + | "cpu_thread_policy": null, + | "cpuset_reserved": null | }, - | { - | "nova_object.name": "InstanceNUMACell", - | "nova_object.data": { - | "pagesize": null, - | "cpu_topology": null, - | "cpuset": [2, 3], - | "cpu_policy": null, - | "memory": 512, - | "cpu_pinning_raw": null, - | "id": 1, - | "cpu_thread_policy": null - | }, - | } - | ] + | "nova_object.changes": ["id"] + | }], + | "emulator_threads_policy": null | }, + | "nova_object.changes": ["cells", "emulator_threads_policy"] | } +----------------------------------------------------------------------------+ diff --git a/doc/source/contributor/testing/serial-console.rst b/doc/source/contributor/testing/serial-console.rst index eb97efa0ca5..b555f32f296 100644 --- a/doc/source/contributor/testing/serial-console.rst +++ b/doc/source/contributor/testing/serial-console.rst @@ -13,7 +13,7 @@ Setting up a devstack environment For instructions on how to setup devstack with serial console support enabled see `this guide -`_. +`_. --------------- Testing the API diff --git a/doc/source/contributor/testing/zero-downtime-upgrade.rst b/doc/source/contributor/testing/zero-downtime-upgrade.rst index 6dab61c328f..300a7ce3dbf 100644 --- a/doc/source/contributor/testing/zero-downtime-upgrade.rst +++ b/doc/source/contributor/testing/zero-downtime-upgrade.rst @@ -158,7 +158,7 @@ claims the virtual IP. You can check which node claimed the virtual IP using: Zero Downtime upgrade process ------------------------------ -General rolling upgrade process: :ref:minimal_downtime_upgrade. +General rolling upgrade process: :ref:`minimal_downtime_upgrade`. Before Upgrade diff --git a/doc/source/figures/bb-cinder-fig1.png b/doc/source/figures/bb-cinder-fig1.png deleted file mode 100644 index 022d3652a17..00000000000 Binary files a/doc/source/figures/bb-cinder-fig1.png and /dev/null differ diff --git a/doc/source/figures/ceph-architecture.png b/doc/source/figures/ceph-architecture.png deleted file mode 100644 index ec408118507..00000000000 Binary files a/doc/source/figures/ceph-architecture.png and /dev/null differ diff --git a/doc/source/figures/emc-enabler.png b/doc/source/figures/emc-enabler.png deleted file mode 100644 index b969b817141..00000000000 Binary files a/doc/source/figures/emc-enabler.png and /dev/null differ diff --git a/doc/source/figures/filteringWorkflow1.png b/doc/source/figures/filteringWorkflow1.png deleted file mode 100644 index 58da979d793..00000000000 Binary files a/doc/source/figures/filteringWorkflow1.png and /dev/null differ diff --git a/doc/source/figures/filteringWorkflow2.png b/doc/source/figures/filteringWorkflow2.png deleted file mode 100644 index e0fe66acfe2..00000000000 Binary files a/doc/source/figures/filteringWorkflow2.png and /dev/null differ diff --git a/doc/source/figures/hds_network.jpg b/doc/source/figures/hds_network.jpg deleted file mode 100644 index bfd9d2bb7c5..00000000000 Binary files a/doc/source/figures/hds_network.jpg and /dev/null differ diff --git a/doc/source/figures/hsp_network.png b/doc/source/figures/hsp_network.png deleted file mode 100644 index 024ddd8c9e2..00000000000 Binary files a/doc/source/figures/hsp_network.png and /dev/null differ diff --git a/doc/source/figures/nova-conf-kvm-flat.png b/doc/source/figures/nova-conf-kvm-flat.png deleted file mode 100644 index ac68d4d331b..00000000000 Binary files a/doc/source/figures/nova-conf-kvm-flat.png and /dev/null differ diff --git a/doc/source/figures/nova-conf-kvm-flat.svg b/doc/source/figures/nova-conf-kvm-flat.svg deleted file mode 100644 index 079a56f92c0..00000000000 --- a/doc/source/figures/nova-conf-kvm-flat.svg +++ /dev/null @@ -1,1833 +0,0 @@ - - - - - Schéma Réseau - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - VBackground-1 - - - Solid - - - - - - - - - - None - - - - - - - - - - - Page-1 - - - - - - Oblique connector.466 - - - - - - - - - - - - - - - - - - Oblique connector.527 - - - - - - - - - - - - - - - - - - Oblique connector.528 - - - - - - - - - - - - - - - - - - Oblique connector.530 - - - - - - - - - - - - - - - - - - Oblique connector.531 - - - - - - - - - - - - - - - - - - Oblique connector.532 - - - - - - - - - - - - - - - - - - Oblique connector.533 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Server.1 - CINDER-VOLUME - - Sheet.2 - - - - Sheet.3 - - - - - Sheet.4 - - - - - Sheet.5 - - - - Sheet.6 - - - - Sheet.7 - - - - Sheet.8 - - - - - - Sheet.9 - - Sheet.10 - - - - Sheet.11 - - - - Sheet.12 - - - - Sheet.13 - - - - Sheet.14 - - - - Sheet.15 - - - - - - - NOVA-VOLUME--iscsi_ip_prefix=nnn.nnn.nnn - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - File server.505 - GLANCE --image_service=nova.image.glance.GlanceImageService -... - - Sheet.506 - - - - - Sheet.507 - - - - - Sheet.508 - - - - - Sheet.509 - - - - Sheet.510 - - - - Sheet.511 - - - - Sheet.512 - - - - - - Sheet.513 - - Sheet.514 - - - - Sheet.515 - - - - Sheet.516 - - - - Sheet.517 - - - - Sheet.518 - - - - Sheet.519 - - - - - - - - Sheet.520 - - Sheet.521 - - - - Sheet.522 - - - - Sheet.523 - - - - Sheet.524 - - - - Sheet.525 - - - - - - - GLANCE--image_service=nova.image.glance.GlanceImageService--glance_api_servers=$nova_glance_host--s3_host=$nova_glance_host - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Server.51 - NOVA-SCHEDULER --rabbit_host=$nova_rabbit_host - - Sheet.52 - - - - Sheet.54 - - - - - Sheet.56 - - - - - Sheet.89 - - - - Sheet.90 - - - - Sheet.91 - - - - Sheet.92 - - - - - - Sheet.93 - - Sheet.94 - - - - Sheet.95 - - - - Sheet.96 - - - - Sheet.97 - - - - Sheet.98 - - - - Sheet.99 - - - - - - - NOVA-SCHEDULER--rabbit_host=$nova_rabbit_host - - - - - - Document - Nova.conf - - Sheet.24 - - - - Sheet.25 - - - - Sheet.26 - - - - Sheet.27 - - - - Sheet.28 - - - - Sheet.29 - - - - Sheet.30 - - - - Sheet.31 - - - - Sheet.32 - - - - Sheet.33 - - - - Sheet.34 - - - - Sheet.35 - - - - - - Nova.conf - - - - - - - - - - - - - - - - - - Data - - Sheet.38 - - - - Sheet.39 - - - - Sheet.40 - - - - - Sheet.41 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Web server - NOVA-API --ec2_url=http://$nova_api_host:8773/services/Cloud - - Sheet.42 - - - - Sheet.43 - - - - - Sheet.44 - - - - - Sheet.45 - - - - Sheet.46 - - - - Sheet.47 - - - - Sheet.48 - - - - - - Sheet.49 - - Sheet.50 - - - - Sheet.53 - - - - Sheet.55 - - - - Sheet.57 - - - - Sheet.58 - - - - Sheet.59 - - - - - - - - Sheet.60 - - Sheet.61 - - Sheet.62 - - - - - Sheet.63 - - - - - Sheet.64 - - - - - - - - NOVA-API--ec2_url=http://$nova_api_host:8773/services/Cloud - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Application server.57 - NOVA-COMPUTE --libvirt_type=kvm --ajax_console_proxy_url=$nov... - - Sheet.88 - - - - Sheet.104 - - Sheet.105 - - - - - Sheet.106 - - - - - Sheet.107 - - - - Sheet.119 - - - - Sheet.120 - - - - Sheet.121 - - - - - - Sheet.122 - - Sheet.123 - - - - Sheet.124 - - - - Sheet.125 - - - - Sheet.126 - - - - Sheet.127 - - - - Sheet.128 - - - - - - Sheet.129 - - Sheet.130 - - - - - Sheet.131 - - - - - Sheet.132 - - - - Sheet.133 - - - - Sheet.134 - - - - Sheet.135 - - - - - - Sheet.136 - - Sheet.137 - - - - Sheet.138 - - - - Sheet.139 - - - - Sheet.140 - - - - Sheet.141 - - - - Sheet.142 - - - - - - Sheet.143 - - Sheet.144 - - - - - Sheet.145 - - - - - Sheet.146 - - - - Sheet.147 - - - - Sheet.148 - - - - Sheet.149 - - - - - - Sheet.150 - - Sheet.151 - - - - Sheet.152 - - - - Sheet.153 - - - - Sheet.154 - - - - Sheet.155 - - - - Sheet.156 - - - - - - Sheet.157 - - Sheet.158 - - - - - Sheet.159 - - - - - Sheet.160 - - - - Sheet.161 - - - - Sheet.162 - - - - Sheet.163 - - - - - - Sheet.164 - - Sheet.165 - - - - Sheet.166 - - - - Sheet.167 - - - - Sheet.168 - - - - Sheet.169 - - - - Sheet.170 - - - - - - - - NOVA-COMPUTE--libvirt_type=kvm--ajax_console_proxy_url=$nova_ajax_proxy_url - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Database server - MySQL --sql_connection=mysql+pymysql://$nova_db_user:$nova_db_pass@$n... - - Sheet.172 - - - - Sheet.173 - - - - - Sheet.174 - - - - - Sheet.175 - - - - Sheet.176 - - - - Sheet.177 - - - - Sheet.178 - - - - - - Sheet.179 - - Sheet.180 - - - - Sheet.181 - - - - Sheet.182 - - - - Sheet.183 - - - - Sheet.184 - - - - Sheet.185 - - - - - - - - Sheet.186 - - Sheet.187 - - - - Sheet.188 - - - - Sheet.189 - - - - - - - MySQL--sql_connection=mysql://$nova_db_user:$nova_db_pass@$nova_db_host/$nova_db_name - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Proxy server - NOVA-NETWORK --dhcpbridge_flagfile=/etc/nova/nova.conf --dhcp... - - Sheet.191 - - - - Sheet.192 - - - - - Sheet.193 - - - - - Sheet.194 - - - - Sheet.195 - - - - Sheet.196 - - - - Sheet.197 - - - - - - Sheet.198 - - Sheet.199 - - - - Sheet.200 - - - - Sheet.201 - - - - Sheet.202 - - - - Sheet.203 - - - - Sheet.204 - - - - - - - - Sheet.205 - - Sheet.206 - - - - Sheet.207 - - - - - - - - - - - - - - - - - Sheet.208 - - - - - - - NOVA-NETWORK--dhcpbridge_flagfile=/etc/nova/nova.conf--dhcpbridge=/usr/bin/nova-dhcpbridge--flat_network_bridge=br100--network_manager=nova.network.manager.FlatManager - - - diff --git a/doc/source/figures/nova-conf-kvm-flat.vsd b/doc/source/figures/nova-conf-kvm-flat.vsd deleted file mode 100644 index f1cff06926d..00000000000 Binary files a/doc/source/figures/nova-conf-kvm-flat.vsd and /dev/null differ diff --git a/doc/source/figures/nova-conf-xen-flat.png b/doc/source/figures/nova-conf-xen-flat.png deleted file mode 100644 index 3e2ad991291..00000000000 Binary files a/doc/source/figures/nova-conf-xen-flat.png and /dev/null differ diff --git a/doc/source/figures/nova-conf-xen-flat.svg b/doc/source/figures/nova-conf-xen-flat.svg deleted file mode 100644 index 73c27a29290..00000000000 --- a/doc/source/figures/nova-conf-xen-flat.svg +++ /dev/null @@ -1,1453 +0,0 @@ - - - - - Schéma Réseau - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - VBackground-1 - - - Solid - - - - - - - - - - None - - - - - - - - - - - Page-1 - - - - - - Oblique connector.466 - - - - - - - - - - - - - - - - - - Oblique connector.527 - - - - - - - - - - - - - - - - - - Oblique connector.528 - - - - - - - - - - - - - - - - - - Oblique connector.532 - - - - - - - - - - - - - - - - - - Oblique connector.533 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - File server.505 - GLANCE --image_service=nova.image.glance.GlanceImageService -... - - Sheet.506 - - - - - Sheet.507 - - - - - Sheet.508 - - - - - Sheet.509 - - - - Sheet.510 - - - - Sheet.511 - - - - Sheet.512 - - - - - - Sheet.513 - - Sheet.514 - - - - Sheet.515 - - - - Sheet.516 - - - - Sheet.517 - - - - Sheet.518 - - - - Sheet.519 - - - - - - - - Sheet.520 - - Sheet.521 - - - - Sheet.522 - - - - Sheet.523 - - - - Sheet.524 - - - - Sheet.525 - - - - - - - GLANCE--image_service=nova.image.glance.GlanceImageService--glance_api_servers=$nova_glance_host--s3_host=$nova_glance_host - - - - - - Document - Nova.conf - - Sheet.24 - - - - Sheet.25 - - - - Sheet.26 - - - - Sheet.27 - - - - Sheet.28 - - - - Sheet.29 - - - - Sheet.30 - - - - Sheet.31 - - - - Sheet.32 - - - - Sheet.33 - - - - Sheet.34 - - - - Sheet.35 - - - - - - Nova.conf - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Web server - NOVA-API --ec2_url=http://$nova_api_host:8773/services/Cloud ... - - Sheet.42 - - - - Sheet.43 - - - - - Sheet.44 - - - - - Sheet.45 - - - - Sheet.46 - - - - Sheet.47 - - - - Sheet.48 - - - - - - Sheet.49 - - Sheet.50 - - - - Sheet.53 - - - - Sheet.55 - - - - Sheet.57 - - - - Sheet.58 - - - - Sheet.59 - - - - - - - - Sheet.60 - - Sheet.61 - - Sheet.62 - - - - - Sheet.63 - - - - - Sheet.64 - - - - - - - - NOVA-API--ec2_url=http://$nova_api_host:8773/services/Cloud--allow_admin_api=true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Application server.57 - NOVA-COMPUTE --compute_driver=xenapi.XenAPIDriver --xenapi_connection_url... - - Sheet.88 - - - - Sheet.104 - - Sheet.105 - - - - - Sheet.106 - - - - - Sheet.107 - - - - Sheet.119 - - - - Sheet.120 - - - - Sheet.121 - - - - - - Sheet.122 - - Sheet.123 - - - - Sheet.124 - - - - Sheet.125 - - - - Sheet.126 - - - - Sheet.127 - - - - Sheet.128 - - - - - - Sheet.129 - - Sheet.130 - - - - - Sheet.131 - - - - - Sheet.132 - - - - Sheet.133 - - - - Sheet.134 - - - - Sheet.135 - - - - - - Sheet.136 - - Sheet.137 - - - - Sheet.138 - - - - Sheet.139 - - - - Sheet.140 - - - - Sheet.141 - - - - Sheet.142 - - - - - - Sheet.143 - - Sheet.144 - - - - - Sheet.145 - - - - - Sheet.146 - - - - Sheet.147 - - - - Sheet.148 - - - - Sheet.149 - - - - - - Sheet.150 - - Sheet.151 - - - - Sheet.152 - - - - Sheet.153 - - - - Sheet.154 - - - - Sheet.155 - - - - Sheet.156 - - - - - - Sheet.157 - - Sheet.158 - - - - - Sheet.159 - - - - - Sheet.160 - - - - Sheet.161 - - - - Sheet.162 - - - - Sheet.163 - - - - - - Sheet.164 - - Sheet.165 - - - - Sheet.166 - - - - Sheet.167 - - - - Sheet.168 - - - - Sheet.169 - - - - Sheet.170 - - - - - - - - NOVA-COMPUTE--connection_type=xenapi--xenapi_connection_url=https://<XenServer IP>--xenapi_connection_username=root--xenapi_connection_password=supersecret--rescue_timeout=86400 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Database server - MySQL --sql_connection=mysql+pymysql://$nova_db_user:$nova_db_pass@$n... - - Sheet.172 - - - - Sheet.173 - - - - - Sheet.174 - - - - - Sheet.175 - - - - Sheet.176 - - - - Sheet.177 - - - - Sheet.178 - - - - - - Sheet.179 - - Sheet.180 - - - - Sheet.181 - - - - Sheet.182 - - - - Sheet.183 - - - - Sheet.184 - - - - Sheet.185 - - - - - - - - Sheet.186 - - Sheet.187 - - - - Sheet.188 - - - - Sheet.189 - - - - - - - MySQL--sql_connection=mysql://$nova_db_user:$nova_db_pass@$nova_db_host/$nova_db_name - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Proxy server - NOVA-NETWORK --network_manager=nova.network.manager.FlatManag... - - Sheet.191 - - - - Sheet.192 - - - - - Sheet.193 - - - - - Sheet.194 - - - - Sheet.195 - - - - Sheet.196 - - - - Sheet.197 - - - - - - Sheet.198 - - Sheet.199 - - - - Sheet.200 - - - - Sheet.201 - - - - Sheet.202 - - - - Sheet.203 - - - - Sheet.204 - - - - - - - - Sheet.205 - - Sheet.206 - - - - Sheet.207 - - - - - - - - - - - - - - - - - Sheet.208 - - - - - - - NOVA-NETWORK--network_manager=nova.network.manager.FlatManager--flat_network_bridge=xenbr0--flat_injected=true--ipv6_backend=account_identifier - - - diff --git a/doc/source/figures/nova-conf-xen-flat.vsd b/doc/source/figures/nova-conf-xen-flat.vsd deleted file mode 100644 index 9bbb05bd35c..00000000000 Binary files a/doc/source/figures/nova-conf-xen-flat.vsd and /dev/null differ diff --git a/doc/source/figures/xenserver_architecture.svg b/doc/source/figures/xenserver_architecture.svg deleted file mode 100644 index 1aa43463896..00000000000 --- a/doc/source/figures/xenserver_architecture.svg +++ /dev/null @@ -1,1035 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - xapi plug-ins - nova-compute - nova.virt.xenapi - - - - - XenAPI - nova-network - dhcpd - Xen - Domain 0 - OpenStack VM - xapi - Tenant VM - - OpenStack add-ons - OpenStack - - - eth0 - - - - eth1 - - - - eth2 - - - - eth0 - - - - xenbr0 - - - - - - - Networks connected to physical interfaces according to the selected configuration. - - - - Storage Repository - Physical Host - - - - - - - - - - - - - - - - - Virtual block devices, usually on local disk. - - - - OpenStack is using the XenAPI Python module to communicate with dom0 through the management network. - - - - - Management Network - Public Network - Tenant Network - - - ... - - diff --git a/doc/source/index.rst b/doc/source/index.rst index 5a2dd2d69f6..01ed1d7a1c2 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -36,6 +36,9 @@ It requires the following additional OpenStack services for basic function: compute instances launch from glance images. * :neutron-doc:`Neutron <>`: This is responsible for provisioning the virtual or physical networks that compute instances connect to on boot. +* :placement-doc:`Placement <>`: This is responsible for tracking inventory of + resources available in a cloud and assisting in choosing which provider of + those resources will be used when creating a virtual machine. It can also integrate with other services to include: persistent block storage, encrypted disks, and baremetal compute instances. @@ -46,6 +49,13 @@ For End Users As an end user of nova, you'll use nova to create and manage servers with either tools or the API directly. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + user/index + Tools for using Nova -------------------- @@ -67,36 +77,30 @@ API, which can be used to build more complicated logic or automation with nova. This can be consumed directly, or via various SDKs. The following resources will help you get started with consuming the API directly. -* `Compute API Guide `_: The +* `Compute API Guide `_: The concept guide for the API. This helps lay out the concepts behind the API to make consuming the API reference easier. -* `Compute API Reference `_: +* `Compute API Reference `_: The complete reference for the compute API, including all methods and request / response parameters and their meaning. * :doc:`Compute API Microversion History `: The compute API evolves over time through `Microversions - `_. This + `_. This provides the history of all those changes. Consider it a "what's new" in the compute API. -* `Placement API Reference `_: - The complete reference for the placement API, including all methods and - request / response parameters and their meaning. -* :ref:`Placement API Microversion History `: - The placement API evolves over time through `Microversions - `_. This - provides the history of all those changes. Consider it a "what's new" in the - placement API. * :doc:`Block Device Mapping `: One of the trickier parts to understand is the Block Device Mapping parameters used to connect specific block devices to computes. This deserves its own deep dive. -* :doc:`Configuration drive `: Provide information to the - guest instance when it is created. +* :doc:`Metadata `: Provide information to the guest instance + when it is created. Nova can be configured to emit notifications over RPC. * :ref:`Versioned Notifications `: This provides the list of existing versioned notifications with sample payloads. +Other end-user guides can be found under :doc:`/user/index`. + For Operators ============= @@ -106,6 +110,13 @@ Architecture Overview * :doc:`Nova architecture `: An overview of how all the parts in nova fit together. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + user/architecture + Installation ------------ @@ -113,9 +124,12 @@ Installation The detailed install guide for nova. A functioning nova will also require having installed :keystone-doc:`keystone `, :glance-doc:`glance -`, and :neutron-doc:`neutron `. Ensure that you follow -their install guides first. +`, :neutron-doc:`neutron `, and +:placement-doc:`placement `. Ensure that you follow their install +guides first. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. .. toctree:: :maxdepth: 2 @@ -142,11 +156,19 @@ the defaults from the :doc:`install guide ` will be sufficient. * :doc:`Cells v2 Planning `: For large deployments, Cells v2 allows sharding of your compute environment. Upfront planning is key to a successful Cells v2 layout. -* :doc:`Placement service `: Overview of the placement - service, including how it fits in with the rest of nova. * :doc:`Running nova-api on wsgi `: Considerations for using a real WSGI container instead of the baked-in eventlet web server. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + user/feature-classification + user/support-matrix + user/cellsv2-layout + user/wsgi + Maintenance ----------- @@ -155,18 +177,30 @@ Once you are running nova, the following information is extremely useful. * :doc:`Admin Guide `: A collection of guides for administrating nova. * :doc:`Flavors `: What flavors are and why they are used. -* :doc:`Upgrades `: How nova is designed to be upgraded for minimal +* :doc:`Upgrades `: How nova is designed to be upgraded for minimal service impact, and the order you should do them in. * :doc:`Quotas `: Managing project quotas in nova. -* :doc:`Aggregates `: Aggregates are a useful way of grouping +* :doc:`Aggregates `: Aggregates are a useful way of grouping hosts together for scheduling purposes. -* :doc:`Filter Scheduler `: How the filter scheduler is +* :doc:`Scheduling `: How the scheduler is configured, and how that will impact where compute instances land in your environment. If you are seeing unexpected distribution of compute instances in your hosts, you'll want to dive into this configuration. -* :doc:`Exposing custom metadata to compute instances `: How and - when you might want to extend the basic metadata exposed to compute instances - (either via metadata server or config drive) for your specific purposes. +* :doc:`Exposing custom metadata to compute instances `: How + and when you might want to extend the basic metadata exposed to compute + instances (either via metadata server or config drive) for your specific + purposes. + +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + admin/index + user/flavors + admin/upgrades + user/quotas + admin/vendordata Reference Material ------------------ @@ -176,95 +210,39 @@ Reference Material * :doc:`Configuration Guide `: Information on configuring the system, including role-based access control policy rules. -For Contributors -================ - -If you are new to Nova, this should help you start to understand what Nova -actually does, and why. - +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. .. toctree:: - :maxdepth: 1 - - contributor/index - -There are also a number of technical references on both current and future -looking parts of our architecture. These are collected below. + :hidden: -.. toctree:: - :maxdepth: 1 + cli/index + configuration/index - reference/index +For Contributors +================ +* :doc:`contributor/contributing`: If you are a new contributor this should + help you to start contributing to Nova. +* :doc:`contributor/index`: If you are new to Nova, this should help you start + to understand what Nova actually does, and why. +* :doc:`reference/index`: There are also a number of technical references on + both current and future looking parts of our architecture. + These are collected here. -.. # NOTE(mriedem): This is the section where we hide things that we don't - # actually want in the table of contents but sphinx build would fail if - # they aren't in the toctree somewhere. For example, we hide api/autoindex - # since that's already covered with modindex below. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. .. toctree:: :hidden: - admin/index - admin/configuration/index - cli/index - configuration/index - contributor/development-environment - contributor/api - contributor/api-2 - contributor/api-ref-guideline - contributor/blueprints - contributor/code-review - contributor/documentation - contributor/microversions - contributor/placement.rst - contributor/policies.rst - contributor/releasenotes - contributor/testing - contributor/testing/libvirt-numa - contributor/testing/serial-console - contributor/testing/zero-downtime-upgrade - contributor/how-to-get-involved - contributor/process - contributor/project-scope - reference/api-microversion-history.rst - reference/gmr - reference/i18n - reference/live-migration - reference/notifications - reference/policy-enforcement - reference/rpc - reference/scheduling - reference/scheduler-evolution - reference/services - reference/stable-api - reference/threading - reference/update-provider-tree - reference/vm-states - user/index - user/aggregates - user/architecture - user/block-device-mapping - user/cells - user/cellsv2-layout - user/certificate-validation - user/conductor - user/config-drive - user/feature-classification - user/filter-scheduler - user/flavors - user/manage-ip-addresses - user/placement - user/quotas - user/support-matrix - user/upgrade - user/user-data - user/vendordata - user/wsgi - + contributor/index + contributor/contributing + reference/index -Search -====== +.. only:: html -* :ref:`Nova document search `: Search the contents of this document. -* `OpenStack wide search `_: Search the wider - set of OpenStack documentation, including forums. + Search + ====== + * :ref:`Nova document search `: Search the contents of this document. + * `OpenStack wide search `_: Search the wider + set of OpenStack documentation, including forums. diff --git a/doc/source/install/compute-install-obs.rst b/doc/source/install/compute-install-obs.rst index 75fbaac0703..c5c1d29fb3d 100644 --- a/doc/source/install/compute-install-obs.rst +++ b/doc/source/install/compute-install-obs.rst @@ -74,11 +74,12 @@ Install and configure components [keystone_authtoken] # ... - auth_url = http://controller:5000/v3 + www_authenticate_uri = http://controller:5000/ + auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password - project_domain_name = default - user_domain_name = default + project_domain_name = Default + user_domain_name = Default project_name = service username = nova password = NOVA_PASS @@ -105,23 +106,6 @@ Install and configure components for the first node in the :ref:`example architecture `. - * In the ``[DEFAULT]`` section, enable support for the Networking service: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [DEFAULT] - # ... - use_neutron = true - firewall_driver = nova.virt.firewall.NoopFirewallDriver - - .. note:: - - By default, Compute uses an internal firewall service. Since - Networking includes a firewall service, you must disable the Compute - firewall service by using the - ``nova.virt.firewall.NoopFirewallDriver`` firewall driver. - * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` for more details. diff --git a/doc/source/install/compute-install-rdo.rst b/doc/source/install/compute-install-rdo.rst index 867ed9ff2a6..0a5ad685a62 100644 --- a/doc/source/install/compute-install-rdo.rst +++ b/doc/source/install/compute-install-rdo.rst @@ -66,11 +66,12 @@ Install and configure components [keystone_authtoken] # ... - auth_url = http://controller:5000/v3 + www_authenticate_uri = http://controller:5000/ + auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password - project_domain_name = default - user_domain_name = default + project_domain_name = Default + user_domain_name = Default project_name = service username = nova password = NOVA_PASS @@ -97,23 +98,6 @@ Install and configure components the first node in the :ref:`example architecture `. - * In the ``[DEFAULT]`` section, enable support for the Networking service: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [DEFAULT] - # ... - use_neutron = true - firewall_driver = nova.virt.firewall.NoopFirewallDriver - - .. note:: - - By default, Compute uses an internal firewall service. Since Networking - includes a firewall service, you must disable the Compute firewall - service by using the ``nova.virt.firewall.NoopFirewallDriver`` firewall - driver. - * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` diff --git a/doc/source/install/compute-install-ubuntu.rst b/doc/source/install/compute-install-ubuntu.rst index a00b6459356..8605c73316e 100644 --- a/doc/source/install/compute-install-ubuntu.rst +++ b/doc/source/install/compute-install-ubuntu.rst @@ -56,11 +56,12 @@ Install and configure components [keystone_authtoken] # ... - auth_url = http://controller:5000/v3 + www_authenticate_uri = http://controller:5000/ + auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password - project_domain_name = default - user_domain_name = default + project_domain_name = Default + user_domain_name = Default project_name = service username = nova password = NOVA_PASS @@ -87,23 +88,6 @@ Install and configure components the first node in the :ref:`example architecture `. - * In the ``[DEFAULT]`` section, enable support for the Networking service: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [DEFAULT] - # ... - use_neutron = true - firewall_driver = nova.virt.firewall.NoopFirewallDriver - - .. note:: - - By default, Compute uses an internal firewall service. Since Networking - includes a firewall service, you must disable the Compute firewall - service by using the ``nova.virt.firewall.NoopFirewallDriver`` firewall - driver. - * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` @@ -152,13 +136,6 @@ Install and configure components # ... lock_path = /var/lib/nova/tmp -.. todo:: - - https://bugs.launchpad.net/ubuntu/+source/nova/+bug/1506667 - - * Due to a packaging bug, remove the ``log_dir`` option from the - ``[DEFAULT]`` section. - * In the ``[placement]`` section, configure the Placement API: .. path /etc/nova/nova.conf diff --git a/doc/source/install/compute-install.rst b/doc/source/install/compute-install.rst index c65eab74b70..2470ab786cd 100644 --- a/doc/source/install/compute-install.rst +++ b/doc/source/install/compute-install.rst @@ -25,6 +25,6 @@ environment with additional compute nodes. .. toctree:: :glob: - compute-install-ubuntu.rst - compute-install-rdo.rst - compute-install-obs.rst + compute-install-ubuntu + compute-install-rdo + compute-install-obs diff --git a/doc/source/install/controller-install-obs.rst b/doc/source/install/controller-install-obs.rst index f65942a81ea..18499612c3e 100644 --- a/doc/source/install/controller-install-obs.rst +++ b/doc/source/install/controller-install-obs.rst @@ -19,15 +19,13 @@ databases, service credentials, and API endpoints. $ mysql -u root -p - * Create the ``nova_api``, ``nova``, ``nova_cell0``, and ``placement`` - databases: + * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases: .. code-block:: console MariaDB [(none)]> CREATE DATABASE nova_api; MariaDB [(none)]> CREATE DATABASE nova; MariaDB [(none)]> CREATE DATABASE nova_cell0; - MariaDB [(none)]> CREATE DATABASE placement; * Grant proper access to the databases: @@ -48,12 +46,7 @@ databases, service credentials, and API endpoints. MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \ - IDENTIFIED BY 'PLACEMENT_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \ - IDENTIFIED BY 'PLACEMENT_DBPASS'; - - Replace ``NOVA_DBPASS`` and ``PLACEMENT_DBPASS`` with a suitable password. + Replace ``NOVA_DBPASS`` with a suitable password. * Exit the database access client. @@ -166,106 +159,11 @@ databases, service credentials, and API endpoints. | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ -#. Create a Placement service user using your chosen ``PLACEMENT_PASS``: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt placement - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | fa742015a6494a949f67629884fc7ec8 | - | name | placement | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - -#. Add the Placement user to the service project with the admin role: - - .. code-block:: console - - $ openstack role add --project service --user placement admin - - .. note:: +#. Install Placement service and configure user and endpoints: - This command provides no output. - -#. Create the Placement API entry in the service catalog: - - .. code-block:: console - - $ openstack service create --name placement \ - --description "Placement API" placement - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Placement API | - | enabled | True | - | id | 2d1a27022e6e4185b86adac4444c495f | - | name | placement | - | type | placement | - +-------------+----------------------------------+ - -#. Create the Placement API service endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - placement public http://controller:8780 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 2b1b2637908b4137a9c2e0470487cbc0 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8780 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - placement internal http://controller:8780 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 02bcda9a150a4bd7993ff4879df971ab | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8780 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - placement admin http://controller:8780 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 3d71177b9e0f406f98cbff198d74b182 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8780 | - +--------------+----------------------------------+ + * Refer to the :placement-doc:`Placement service install guide + ` + for more information. Install and configure components -------------------------------- @@ -284,9 +182,12 @@ Install and configure components .. code-block:: console - # zypper install openstack-nova-api openstack-nova-scheduler \ - openstack-nova-conductor openstack-nova-novncproxy \ - openstack-nova-placement-api iptables + # zypper install \ + openstack-nova-api \ + openstack-nova-scheduler \ + openstack-nova-conductor \ + openstack-nova-novncproxy \ + iptables #. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: @@ -300,8 +201,8 @@ Install and configure components # ... enabled_apis = osapi_compute,metadata - * In the ``[api_database]``, ``[database]``, and ``[placement_database]`` - sections, configure database access: + * In the ``[api_database]`` and ``[database]`` sections, configure database + access: .. path /etc/nova/nova.conf .. code-block:: ini @@ -314,12 +215,8 @@ Install and configure components # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova - [placement_database] - # ... - connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement - Replace ``NOVA_DBPASS`` with the password you chose for the Compute - databases and ``PLACEMENT_DBPASS`` for Placement database. + databases. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: @@ -328,7 +225,7 @@ Install and configure components [DEFAULT] # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller + transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. @@ -345,11 +242,12 @@ Install and configure components [keystone_authtoken] # ... - auth_url = http://controller:5000/v3 + www_authenticate_uri = http://controller:5000/ + auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password - project_domain_name = default - user_domain_name = default + project_domain_name = Default + user_domain_name = Default project_name = service username = nova password = NOVA_PASS @@ -372,23 +270,6 @@ Install and configure components # ... my_ip = 10.0.0.11 - * In the ``[DEFAULT]`` section, enable support for the Networking service: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [DEFAULT] - # ... - use_neutron = true - firewall_driver = nova.virt.firewall.NoopFirewallDriver - - .. note:: - - By default, Compute uses an internal firewall driver. Since the - Networking service includes a firewall driver, you must disable the - Compute firewall driver by using the - ``nova.virt.firewall.NoopFirewallDriver`` firewall driver. - * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` @@ -425,7 +306,8 @@ Install and configure components # ... lock_path = /var/run/nova - * In the ``[placement]`` section, configure the Placement API: + * In the ``[placement]`` section, configure access to the Placement + service: .. path /etc/nova/nova.conf .. code-block:: ini @@ -442,10 +324,11 @@ Install and configure components password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the - ``placement`` user in the Identity service. Comment out any other options - in the ``[placement]`` section. + ``placement`` service user created when installing + :placement-doc:`Placement `. Comment out or remove any other + options in the ``[placement]`` section. -#. Populate the ``nova-api`` and ``placement`` databases: +#. Populate the ``nova-api`` database: .. code-block:: console @@ -466,7 +349,6 @@ Install and configure components .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova - 109e1d4b-536a-40d0-83c6-5f121b82b650 #. Populate the nova database: @@ -479,31 +361,27 @@ Install and configure components .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova - +-------+--------------------------------------+ - | Name | UUID | - +-------+--------------------------------------+ - | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 | - | cell0 | 00000000-0000-0000-0000-000000000000 | - +-------+--------------------------------------+ + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ + | Name | UUID | Transport URL | Database Connection | Disabled | + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ + | cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0?charset=utf8 | False | + | cell1 | f690f4fd-2bc5-4f15-8145-db561a7b9d3d | rabbit://openstack:****@controller:5672/nova_cell1 | mysql+pymysql://nova:****@controller/nova_cell1?charset=utf8 | False | + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ Finalize installation --------------------- -* Enable the placement API Apache vhost: - - .. code-block:: console - - # mv /etc/apache2/vhosts.d/nova-placement-api.conf.sample \ - /etc/apache2/vhosts.d/nova-placement-api.conf - # systemctl reload apache2.service - * Start the Compute services and configure them to start when the system boots: .. code-block:: console - # systemctl enable openstack-nova-api.service \ - openstack-nova-scheduler.service openstack-nova-conductor.service \ - openstack-nova-novncproxy.service - # systemctl start openstack-nova-api.service \ - openstack-nova-scheduler.service openstack-nova-conductor.service \ - openstack-nova-novncproxy.service + # systemctl enable \ + openstack-nova-api.service \ + openstack-nova-scheduler.service \ + openstack-nova-conductor.service \ + openstack-nova-novncproxy.service + # systemctl start \ + openstack-nova-api.service \ + openstack-nova-scheduler.service \ + openstack-nova-conductor.service \ + openstack-nova-novncproxy.service diff --git a/doc/source/install/controller-install-rdo.rst b/doc/source/install/controller-install-rdo.rst index 20fd0e60ee5..fd2419631ec 100644 --- a/doc/source/install/controller-install-rdo.rst +++ b/doc/source/install/controller-install-rdo.rst @@ -19,14 +19,13 @@ databases, service credentials, and API endpoints. $ mysql -u root -p - * Create the ``nova_api``, ``nova``, ``nova_cell0``, and ``placement`` databases: + * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases: .. code-block:: console MariaDB [(none)]> CREATE DATABASE nova_api; MariaDB [(none)]> CREATE DATABASE nova; MariaDB [(none)]> CREATE DATABASE nova_cell0; - MariaDB [(none)]> CREATE DATABASE placement; * Grant proper access to the databases: @@ -47,12 +46,7 @@ databases, service credentials, and API endpoints. MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \ - IDENTIFIED BY 'PLACEMENT_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \ - IDENTIFIED BY 'PLACEMENT_DBPASS'; - - Replace ``NOVA_DBPASS`` and ``PLACEMENT_DBPASS`` with a suitable password. + Replace ``NOVA_DBPASS`` with a suitable password. * Exit the database access client. @@ -165,106 +159,11 @@ databases, service credentials, and API endpoints. | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ -#. Create a Placement service user using your chosen ``PLACEMENT_PASS``: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt placement - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | fa742015a6494a949f67629884fc7ec8 | - | name | placement | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - -#. Add the Placement user to the service project with the admin role: - - .. code-block:: console - - $ openstack role add --project service --user placement admin - - .. note:: - - This command provides no output. +#. Install Placement service and configure user and endpoints: -#. Create the Placement API entry in the service catalog: - - .. code-block:: console - - $ openstack service create --name placement \ - --description "Placement API" placement - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Placement API | - | enabled | True | - | id | 2d1a27022e6e4185b86adac4444c495f | - | name | placement | - | type | placement | - +-------------+----------------------------------+ - -#. Create the Placement API service endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - placement public http://controller:8778 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 2b1b2637908b4137a9c2e0470487cbc0 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8778 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - placement internal http://controller:8778 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 02bcda9a150a4bd7993ff4879df971ab | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8778 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - placement admin http://controller:8778 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 3d71177b9e0f406f98cbff198d74b182 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8778 | - +--------------+----------------------------------+ + * Refer to the :placement-doc:`Placement service install guide + ` + for more information. Install and configure components -------------------------------- @@ -276,8 +175,7 @@ Install and configure components .. code-block:: console # yum install openstack-nova-api openstack-nova-conductor \ - openstack-nova-console openstack-nova-novncproxy \ - openstack-nova-scheduler openstack-nova-placement-api + openstack-nova-novncproxy openstack-nova-scheduler #. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: @@ -290,8 +188,8 @@ Install and configure components # ... enabled_apis = osapi_compute,metadata - * In the ``[api_database]``, ``[database]``, and ``[placement_database]`` - sections, configure database access: + * In the ``[api_database]`` and ``[database]`` sections, configure database + access: .. path /etc/nova/nova.conf .. code-block:: ini @@ -304,12 +202,8 @@ Install and configure components # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova - [placement_database] - # ... - connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement - Replace ``NOVA_DBPASS`` with the password you chose for the Compute - databases and ``PLACEMENT_DBPASS`` for Placement database. + databases. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: @@ -318,7 +212,7 @@ Install and configure components [DEFAULT] # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller + transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. @@ -335,11 +229,12 @@ Install and configure components [keystone_authtoken] # ... - auth_url = http://controller:5000/v3 + www_authenticate_uri = http://controller:5000/ + auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password - project_domain_name = default - user_domain_name = default + project_domain_name = Default + user_domain_name = Default project_name = service username = nova password = NOVA_PASS @@ -362,23 +257,6 @@ Install and configure components # ... my_ip = 10.0.0.11 - * In the ``[DEFAULT]`` section, enable support for the Networking service: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [DEFAULT] - # ... - use_neutron = true - firewall_driver = nova.virt.firewall.NoopFirewallDriver - - .. note:: - - By default, Compute uses an internal firewall driver. Since the - Networking service includes a firewall driver, you must disable the - Compute firewall driver by using the - ``nova.virt.firewall.NoopFirewallDriver`` firewall driver. - * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` for more details. @@ -414,7 +292,8 @@ Install and configure components # ... lock_path = /var/lib/nova/tmp - * In the ``[placement]`` section, configure the Placement API: + * In the ``[placement]`` section, configure access to the Placement + service: .. path /etc/nova/nova.conf .. code-block:: ini @@ -431,34 +310,11 @@ Install and configure components password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the - ``placement`` user in the Identity service. Comment out any other options - in the ``[placement]`` section. - - * Due to a `packaging bug - `_, you must enable - access to the Placement API by adding the following configuration to - ``/etc/httpd/conf.d/00-nova-placement-api.conf``: - - .. path /etc/httpd/conf.d/00-nova-placement-api.conf - .. code-block:: ini - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - - - * Restart the httpd service: - - .. code-block:: console - - # systemctl restart httpd + ``placement`` service user created when installing + :placement-doc:`Placement `. Comment out or remove any other + options in the ``[placement]`` section. -#. Populate the ``nova-api`` and ``placement`` databases: +#. Populate the ``nova-api`` database: .. code-block:: console @@ -479,7 +335,6 @@ Install and configure components .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova - 109e1d4b-536a-40d0-83c6-5f121b82b650 #. Populate the nova database: @@ -492,12 +347,12 @@ Install and configure components .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova - +-------+--------------------------------------+ - | Name | UUID | - +-------+--------------------------------------+ - | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 | - | cell0 | 00000000-0000-0000-0000-000000000000 | - +-------+--------------------------------------+ + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ + | Name | UUID | Transport URL | Database Connection | Disabled | + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ + | cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0?charset=utf8 | False | + | cell1 | f690f4fd-2bc5-4f15-8145-db561a7b9d3d | rabbit://openstack:****@controller:5672/nova_cell1 | mysql+pymysql://nova:****@controller/nova_cell1?charset=utf8 | False | + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ Finalize installation --------------------- @@ -506,9 +361,13 @@ Finalize installation .. code-block:: console - # systemctl enable openstack-nova-api.service \ - openstack-nova-scheduler.service openstack-nova-conductor.service \ - openstack-nova-novncproxy.service - # systemctl start openstack-nova-api.service \ - openstack-nova-scheduler.service openstack-nova-conductor.service \ - openstack-nova-novncproxy.service + # systemctl enable \ + openstack-nova-api.service \ + openstack-nova-scheduler.service \ + openstack-nova-conductor.service \ + openstack-nova-novncproxy.service + # systemctl start \ + openstack-nova-api.service \ + openstack-nova-scheduler.service \ + openstack-nova-conductor.service \ + openstack-nova-novncproxy.service diff --git a/doc/source/install/controller-install-ubuntu.rst b/doc/source/install/controller-install-ubuntu.rst index be479ed1e40..7282b0b2e22 100644 --- a/doc/source/install/controller-install-ubuntu.rst +++ b/doc/source/install/controller-install-ubuntu.rst @@ -19,15 +19,13 @@ databases, service credentials, and API endpoints. # mysql - * Create the ``nova_api``, ``nova``, ``nova_cell0``, and ``placement`` - databases: + * Create the ``nova_api``, ``nova``, and ``nova_cell0`` databases: .. code-block:: console MariaDB [(none)]> CREATE DATABASE nova_api; MariaDB [(none)]> CREATE DATABASE nova; MariaDB [(none)]> CREATE DATABASE nova_cell0; - MariaDB [(none)]> CREATE DATABASE placement; * Grant proper access to the databases: @@ -48,11 +46,6 @@ databases, service credentials, and API endpoints. MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \ - IDENTIFIED BY 'PLACEMENT_DBPASS'; - MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \ - IDENTIFIED BY 'PLACEMENT_DBPASS'; - Replace ``NOVA_DBPASS`` with a suitable password. * Exit the database access client. @@ -166,106 +159,11 @@ databases, service credentials, and API endpoints. | url | http://controller:8774/v2.1 | +--------------+-------------------------------------------+ -#. Create a Placement service user using your chosen ``PLACEMENT_PASS``: - - .. code-block:: console - - $ openstack user create --domain default --password-prompt placement - - User Password: - Repeat User Password: - +---------------------+----------------------------------+ - | Field | Value | - +---------------------+----------------------------------+ - | domain_id | default | - | enabled | True | - | id | fa742015a6494a949f67629884fc7ec8 | - | name | placement | - | options | {} | - | password_expires_at | None | - +---------------------+----------------------------------+ - -#. Add the Placement user to the service project with the admin role: - - .. code-block:: console - - $ openstack role add --project service --user placement admin - - .. note:: - - This command provides no output. - -#. Create the Placement API entry in the service catalog: - - .. code-block:: console - - $ openstack service create --name placement \ - --description "Placement API" placement - - +-------------+----------------------------------+ - | Field | Value | - +-------------+----------------------------------+ - | description | Placement API | - | enabled | True | - | id | 2d1a27022e6e4185b86adac4444c495f | - | name | placement | - | type | placement | - +-------------+----------------------------------+ - -#. Create the Placement API service endpoints: - - .. code-block:: console - - $ openstack endpoint create --region RegionOne \ - placement public http://controller:8778 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 2b1b2637908b4137a9c2e0470487cbc0 | - | interface | public | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8778 | - +--------------+----------------------------------+ - - $ openstack endpoint create --region RegionOne \ - placement internal http://controller:8778 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 02bcda9a150a4bd7993ff4879df971ab | - | interface | internal | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8778 | - +--------------+----------------------------------+ +#. Install Placement service and configure user and endpoints: - $ openstack endpoint create --region RegionOne \ - placement admin http://controller:8778 - - +--------------+----------------------------------+ - | Field | Value | - +--------------+----------------------------------+ - | enabled | True | - | id | 3d71177b9e0f406f98cbff198d74b182 | - | interface | admin | - | region | RegionOne | - | region_id | RegionOne | - | service_id | 2d1a27022e6e4185b86adac4444c495f | - | service_name | placement | - | service_type | placement | - | url | http://controller:8778 | - +--------------+----------------------------------+ + * Refer to the :placement-doc:`Placement service install guide + ` + for more information. Install and configure components -------------------------------- @@ -276,13 +174,12 @@ Install and configure components .. code-block:: console - # apt install nova-api nova-conductor nova-novncproxy nova-scheduler \ - nova-placement-api + # apt install nova-api nova-conductor nova-novncproxy nova-scheduler #. Edit the ``/etc/nova/nova.conf`` file and complete the following actions: - * In the ``[api_database]``, ``[database]``, and ``[placement_database]`` - sections, configure database access: + * In the ``[api_database]`` and ``[database]`` sections, configure database + access: .. path /etc/nova/nova.conf .. code-block:: ini @@ -295,12 +192,8 @@ Install and configure components # ... connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova - [placement_database] - # ... - connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement - Replace ``NOVA_DBPASS`` with the password you chose for the Compute - databases and ``PLACEMENT_DBPASS`` for Placement database. + databases. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: @@ -309,7 +202,7 @@ Install and configure components [DEFAULT] # ... - transport_url = rabbit://openstack:RABBIT_PASS@controller + transport_url = rabbit://openstack:RABBIT_PASS@controller:5672/ Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. @@ -326,11 +219,12 @@ Install and configure components [keystone_authtoken] # ... - auth_url = http://controller:5000/v3 + www_authenticate_uri = http://controller:5000/ + auth_url = http://controller:5000/ memcached_servers = controller:11211 auth_type = password - project_domain_name = default - user_domain_name = default + project_domain_name = Default + user_domain_name = Default project_name = service username = nova password = NOVA_PASS @@ -353,23 +247,6 @@ Install and configure components # ... my_ip = 10.0.0.11 - * In the ``[DEFAULT]`` section, enable support for the Networking service: - - .. path /etc/nova/nova.conf - .. code-block:: ini - - [DEFAULT] - # ... - use_neutron = true - firewall_driver = nova.virt.firewall.NoopFirewallDriver - - .. note:: - - By default, Compute uses an internal firewall driver. Since the - Networking service includes a firewall driver, you must disable the - Compute firewall driver by using the - ``nova.virt.firewall.NoopFirewallDriver`` firewall driver. - * Configure the ``[neutron]`` section of **/etc/nova/nova.conf**. Refer to the :neutron-doc:`Networking service install guide ` @@ -409,7 +286,8 @@ Install and configure components * Due to a packaging bug, remove the ``log_dir`` option from the ``[DEFAULT]`` section. - * In the ``[placement]`` section, configure the Placement API: + * In the ``[placement]`` section, configure access to the Placement + service: .. path /etc/nova/nova.conf .. code-block:: ini @@ -426,10 +304,11 @@ Install and configure components password = PLACEMENT_PASS Replace ``PLACEMENT_PASS`` with the password you choose for the - ``placement`` user in the Identity service. Comment out any other options - in the ``[placement]`` section. + ``placement`` service user created when installing + :placement-doc:`Placement `. Comment out or remove any other + options in the ``[placement]`` section. -#. Populate the ``nova-api`` and ``placement`` databases: +#. Populate the ``nova-api`` database: .. code-block:: console @@ -450,7 +329,6 @@ Install and configure components .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova - 109e1d4b-536a-40d0-83c6-5f121b82b650 #. Populate the nova database: @@ -463,12 +341,12 @@ Install and configure components .. code-block:: console # su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova - +-------+--------------------------------------+ - | Name | UUID | - +-------+--------------------------------------+ - | cell1 | 109e1d4b-536a-40d0-83c6-5f121b82b650 | - | cell0 | 00000000-0000-0000-0000-000000000000 | - +-------+--------------------------------------+ + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ + | Name | UUID | Transport URL | Database Connection | Disabled | + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ + | cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0?charset=utf8 | False | + | cell1 | f690f4fd-2bc5-4f15-8145-db561a7b9d3d | rabbit://openstack:****@controller:5672/nova_cell1 | mysql+pymysql://nova:****@controller/nova_cell1?charset=utf8 | False | + +-------+--------------------------------------+----------------------------------------------------+--------------------------------------------------------------+----------+ Finalize installation --------------------- diff --git a/doc/source/install/controller-install.rst b/doc/source/install/controller-install.rst index 4f09ea38427..fd97c985747 100644 --- a/doc/source/install/controller-install.rst +++ b/doc/source/install/controller-install.rst @@ -7,6 +7,6 @@ and Red Hat Enterprise Linux and CentOS. .. toctree:: - controller-install-ubuntu.rst - controller-install-obs.rst - controller-install-rdo.rst + controller-install-ubuntu + controller-install-obs + controller-install-rdo diff --git a/doc/source/install/get-started-compute.rst b/doc/source/install/get-started-compute.rst index 2a332367362..e3e81033cd0 100644 --- a/doc/source/install/get-started-compute.rst +++ b/doc/source/install/get-started-compute.rst @@ -8,12 +8,13 @@ Use OpenStack Compute to host and manage cloud computing systems. OpenStack Compute is a major part of an Infrastructure-as-a-Service (IaaS) system. The main modules are implemented in Python. -OpenStack Compute interacts with OpenStack Identity for authentication; -OpenStack Image service for disk and server images; and OpenStack Dashboard for -the user and administrative interface. Image access is limited by projects, and -by users; quotas are limited per project (the number of instances, for -example). OpenStack Compute can scale horizontally on standard hardware, and -download images to launch instances. +OpenStack Compute interacts with OpenStack Identity for authentication, +OpenStack Placement for resource inventory tracking and selection, OpenStack +Image service for disk and server images, and OpenStack Dashboard for the user +and administrative interface. Image access is limited by projects, and by +users; quotas are limited per project (the number of instances, for example). +OpenStack Compute can scale horizontally on standard hardware, and download +images to launch instances. OpenStack Compute consists of the following areas and their components: @@ -23,17 +24,13 @@ OpenStack Compute consists of the following areas and their components: orchestration activities, such as running an instance. ``nova-api-metadata`` service - Accepts metadata requests from instances. The ``nova-api-metadata`` service - is generally used when you run in multi-host mode with ``nova-network`` - installations. For details, see :ref:`metadata-service-deploy` - in the Compute Administrator Guide. + Accepts metadata requests from instances. For more information, refer to + :doc:`/admin/metadata-service`. ``nova-compute`` service A worker daemon that creates and terminates virtual machine instances through hypervisor APIs. For example: - - XenAPI for XenServer/XCP - - libvirt for KVM or QEMU - VMwareAPI for VMware @@ -42,10 +39,6 @@ OpenStack Compute consists of the following areas and their components: queue and performs a series of system commands such as launching a KVM instance and updating its state in the database. -``nova-placement-api`` service - Tracks the inventory and usage of each provider. For details, see - :doc:`/user/placement`. - ``nova-scheduler`` service Takes a virtual machine instance request from the queue and determines on which compute server host it runs. @@ -58,18 +51,6 @@ OpenStack Compute consists of the following areas and their components: For more information, see the ``conductor`` section in the :doc:`/configuration/config`. -``nova-consoleauth`` daemon - Authorizes tokens for users that console proxies provide. See - ``nova-novncproxy`` and ``nova-xvpvncproxy``. This service must be running - for console proxies to work. You can run proxies of either type against a - single nova-consoleauth service in a cluster configuration. For information, - see :ref:`about-nova-consoleauth`. - - .. deprecated:: 18.0.0 - - ``nova-consoleauth`` is deprecated since 18.0.0 (Rocky) and will be removed - in an upcoming release. - ``nova-novncproxy`` daemon Provides a proxy for accessing running instances through a VNC connection. Supports browser-based novnc clients. @@ -78,14 +59,10 @@ OpenStack Compute consists of the following areas and their components: Provides a proxy for accessing running instances through a SPICE connection. Supports browser-based HTML5 client. -``nova-xvpvncproxy`` daemon - Provides a proxy for accessing running instances through a VNC connection. - Supports an OpenStack-specific Java client. - The queue A central hub for passing messages between daemons. Usually implemented with - `RabbitMQ `__, also can be implemented with - another AMQP message queue, such as `ZeroMQ `__. + `RabbitMQ `__ but + :oslo.messaging-doc:`other options are available `. SQL database Stores most build-time and run-time states for a cloud infrastructure, diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst index ee6c0a7c42a..aa368fa9689 100644 --- a/doc/source/install/index.rst +++ b/doc/source/install/index.rst @@ -4,8 +4,8 @@ Compute service .. toctree:: - overview.rst - get-started-compute.rst - controller-install.rst - compute-install.rst - verify.rst + overview + get-started-compute + controller-install + compute-install + verify diff --git a/doc/source/install/overview.rst b/doc/source/install/overview.rst index 25a3b56f6ff..9781973d9f3 100644 --- a/doc/source/install/overview.rst +++ b/doc/source/install/overview.rst @@ -48,6 +48,14 @@ Object Storage require additional nodes. or how to determine which architecture is required, see the `Architecture Design Guide `_. +.. warning:: + + Once a cloud has been deployed, changing the host name of *any* node in the + deployment is not supported. In some cases, it may be possible to remove a + node from the deployment, and add it again under a different host name. + Renaming a node in situ will result in problems that will require multiple + manual fixes. + This example architecture differs from a minimal production architecture as follows: diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst index 43abb5d9474..99936c1d9c5 100644 --- a/doc/source/install/verify.rst +++ b/doc/source/install/verify.rst @@ -23,15 +23,14 @@ Verify operation of the Compute service. +----+--------------------+------------+----------+---------+-------+----------------------------+ | Id | Binary | Host | Zone | Status | State | Updated At | +----+--------------------+------------+----------+---------+-------+----------------------------+ - | 1 | nova-consoleauth | controller | internal | enabled | up | 2016-02-09T23:11:15.000000 | - | 2 | nova-scheduler | controller | internal | enabled | up | 2016-02-09T23:11:15.000000 | - | 3 | nova-conductor | controller | internal | enabled | up | 2016-02-09T23:11:16.000000 | - | 4 | nova-compute | compute1 | nova | enabled | up | 2016-02-09T23:11:20.000000 | + | 1 | nova-scheduler | controller | internal | enabled | up | 2016-02-09T23:11:15.000000 | + | 2 | nova-conductor | controller | internal | enabled | up | 2016-02-09T23:11:16.000000 | + | 3 | nova-compute | compute1 | nova | enabled | up | 2016-02-09T23:11:20.000000 | +----+--------------------+------------+----------+---------+-------+----------------------------+ .. note:: - This output should indicate three service components enabled on the + This output should indicate two service components enabled on the controller node and one service component enabled on the compute node. #. List API endpoints in the Identity service to verify connectivity with the @@ -96,24 +95,39 @@ Verify operation of the Compute service. | 9a76d9f9-9620-4f2e-8c69-6c5691fae163 | cirros | active | +--------------------------------------+-------------+-------------+ -#. Check the cells and placement API are working successfully: +#. Check the cells and placement API are working successfully and that other + necessary prerequisites are in place: + + .. _verify-install-nova-status: .. code-block:: console # nova-status upgrade check - +---------------------------+ - | Upgrade Check Results | - +---------------------------+ - | Check: Cells v2 | - | Result: Success | - | Details: None | - +---------------------------+ - | Check: Placement API | - | Result: Success | - | Details: None | - +---------------------------+ - | Check: Resource Providers | - | Result: Success | - | Details: None | - +---------------------------+ + +--------------------------------------------------------------------+ + | Upgrade Check Results | + +--------------------------------------------------------------------+ + | Check: Cells v2 | + | Result: Success | + | Details: None | + +--------------------------------------------------------------------+ + | Check: Placement API | + | Result: Success | + | Details: None | + +--------------------------------------------------------------------+ + | Check: Cinder API | + | Result: Success | + | Details: None | + +--------------------------------------------------------------------+ + | Check: Policy Scope-based Defaults | + | Result: Success | + | Details: None | + +--------------------------------------------------------------------+ + | Check: Policy File JSON to YAML Migration | + | Result: Success | + | Details: None | + +--------------------------------------------------------------------+ + | Check: Older than N-1 computes | + | Result: Success | + | Details: None | + +--------------------------------------------------------------------+ diff --git a/doc/source/reference/attach-volume.rst b/doc/source/reference/attach-volume.rst new file mode 100644 index 00000000000..a38a32e533f --- /dev/null +++ b/doc/source/reference/attach-volume.rst @@ -0,0 +1,34 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================= +Attaching Volumes +================= + +The following sequence diagram outlines the current flow when attaching a +volume to an instance using the ``os-volume_attachments`` API. This diagram +uses the ``libvirt`` driver as an example virt driver to additionally document +the optional interactions with the ``os-brick`` library on the compute hosts +during the request. + +.. note:: ``os-brick`` is not always used to connect volumes to the host, most + notibly when connecting an instance natively to ceph ``rbd`` volumes + +The diagram also outlines the various locks taken on the compute during the +attach volume flow. In this example these include locks against the +``instance.uuid``, ``cinder_backend.uuid`` orchestrated for ``nova-compute`` by +``os-brick`` and the generic ``connect_volume`` lock taken within os-brick +itself. This final ``connect_volume`` lock also being held when detaching and +disconnecting a volume from the host by ``os-brick``. + +.. seqdiag:: attach_volume.diag diff --git a/doc/source/reference/attach_volume.diag b/doc/source/reference/attach_volume.diag new file mode 100644 index 00000000000..63386f5c644 --- /dev/null +++ b/doc/source/reference/attach_volume.diag @@ -0,0 +1,34 @@ +seqdiag { + user; nova-api; nova-conductor; nova-compute; libvirt-driver; os-brick; cinder-api; + edge_length = 300; + span_height = 16; + activation = none; + default_note_color = white; + user -> nova-api [label = "POST /servers/{server_id}/os-volume_attachments"]; + nova-api -> nova-compute [label = "RPC call reserve_block_device_name"]; + nova-compute -> nova-compute [label = "instance.uuid lock"]; + nova-compute ->> nova-conductor [label = "bdm.create"]; + nova-compute <<- nova-conductor [label = "return BlockDeviceMapping"]; + nova-compute -> libvirt-driver [label = "get_device_name_for_instance"]; + nova-compute <- libvirt-driver [label = "Return get_device_name_for_instance"]; + nova-api <- nova-compute [label = "Return reserve_block_device_name"]; + nova-api -> cinder-api [label = "POST /v3/{project_id}/attachments"]; + nova-api <- cinder-api [label = "Return HTTP 200 (without connection_info)"]; + nova-api ->> nova-compute [label = "RPC cast attach_volume"]; + user <- nova-api [label = "Return HTTP 200 (includes device_name)"]; + nova-compute -> nova-compute [label = "instance.uuid lock"]; + nova-compute -> os-brick [label = "cinder_backend.uuid lock"]; + nova-compute -> cinder-api [label = "PUT /v3/{project_id}/attachments/{attachment_id}"]; + nova-compute <- cinder-api [label = "Return HTTP 200 (includes connection_info)"]; + nova-compute -> libvirt-driver [label = "attach_volume"]; + libvirt-driver -> os-brick [label = "connect_volume"]; + os-brick -> os-brick [label = "connect_volume lock"]; + libvirt-driver <- os-brick; + libvirt-driver -> libvirt-driver [label = "guest.attach_device"]; + libvirt-driver -> libvirt-driver [label = "_build_device_metadata"]; + libvirt-driver ->> nova-conductor [label = "instance.save"]; + nova-compute <- libvirt-driver [label = "Return attach_volume"]; + nova-compute ->> nova-conductor [label = "bdm.save"]; + nova-compute -> cinder-api [label = "POST /v3/{project_id}/attachments/{attachment_id}/action (os-complete)"]; + nova-compute <- cinder-api [label = "Return HTTP 200"]; +} diff --git a/doc/source/reference/block-device-structs.rst b/doc/source/reference/block-device-structs.rst new file mode 100644 index 00000000000..1b8636c5378 --- /dev/null +++ b/doc/source/reference/block-device-structs.rst @@ -0,0 +1,227 @@ +========================== +Driver BDM Data Structures +========================== + +In addition to the :doc:`API BDM data format ` +there are also several internal data structures within Nova that map out how +block devices are attached to instances. This document aims to outline the two +general data structures and two additional specific data structures used by the +libvirt virt driver. + +.. note:: + + This document is based on an email to the openstack-dev mailing + list by Matthew Booth below provided as a primer for developers working on + virt drivers and interacting with these data structures. + + http://lists.openstack.org/pipermail/openstack-dev/2016-June/097529.html + +.. note:: + + References to local disks in the following document refer to any + disk directly managed by nova compute. If nova is configured to use RBD or + NFS for instance disks then these disks won't actually be local, but they + are still managed locally and referred to as local disks. As opposed to RBD + volumes provided by Cinder that are not considered local. + +Generic BDM data structures +=========================== + +``BlockDeviceMapping`` +---------------------- + +The 'top level' data structure is the ``BlockDeviceMapping`` (BDM) object. It +is a ``NovaObject``, persisted in the DB. Current code creates a BDM object for +every disk associated with an instance, whether it is a volume or not. + +The BDM object describes properties of each disk as specified by the user. It +is initially from a user request, for more details on the format of these +requests please see the :doc:`Block Device Mapping in Nova +<../user/block-device-mapping>` document. + +The Compute API transforms and consolidates all BDMs to ensure that all disks, +explicit or implicit, have a BDM, and then persists them. Look in +``nova.objects.block_device`` for all BDM fields, but in essence they contain +information like (source_type='image', destination_type='local', +image_id='), or equivalents describing ephemeral disks, swap disks +or volumes, and some associated data. + +.. note:: + + BDM objects are typically stored in variables called ``bdm`` with lists + in ``bdms``, although this is obviously not guaranteed (and unfortunately + not always true: ``bdm`` in ``libvirt.block_device`` is usually a + ``DriverBlockDevice`` object). This is a useful reading aid (except when + it's proactively confounding), as there is also something else typically + called ``block_device_mapping`` which is not a ``BlockDeviceMapping`` + object. + +``block_device_info`` +--------------------- + +.. versionchanged:: 24.0.0 (Xena) + + The legacy block_device_info format is no longer supported. + +Drivers do not directly use BDM objects. Instead, they are transformed into a +different driver-specific representation. This representation is normally +called ``block_device_info``, and is generated by +``virt.driver.get_block_device_info()``. Its output is based on data in BDMs. +``block_device_info`` is a dict containing: + +``root_device_name`` + Hypervisor's notion of the root device's name +``ephemerals`` + A list of all ephemeral disks +``block_device_mapping`` + A list of all cinder volumes +``swap`` + A swap disk, or None if there is no swap disk + +.. note:: + + The disks were previously represented in one of two ways, depending on the + specific driver in use. A legacy plain dict format or the currently used + DriverBlockDevice format discussed below. Support for the legacy format + was removed in Xena. + +Disks are represented by subclasses of ``nova.block_device.DriverBlockDevice``. +These subclasses retain a reference to the underlying BDM object. This means +that by manipulating the ``DriverBlockDevice`` object, the driver is able to +persist data to the BDM object in the DB. + +.. note:: + + Common usage is to pull ``block_device_mapping`` out of this + dict into a variable called ``block_device_mapping``. This is not a + ``BlockDeviceMapping`` object, or a list of them. + +.. note:: + + If ``block_device_info`` was passed to the driver by compute manager, it + was probably generated by ``_get_instance_block_device_info()``. + By default, this function filters out all cinder volumes from + ``block_device_mapping`` which don't currently have ``connection_info``. + In other contexts this filtering will not have happened, and + ``block_device_mapping`` will contain all volumes. + +.. note:: + + Unlike BDMs, ``block_device_info`` does not currently represent all + disks that an instance might have. Significantly, it will not contain any + representation of an image-backed local disk, i.e. the root disk of a + typical instance which isn't boot-from-volume. Other representations used + by the libvirt driver explicitly reconstruct this missing disk. + +libvirt driver specific BDM data structures +=========================================== + +``instance_disk_info`` +---------------------- + +The virt driver API defines a method ``get_instance_disk_info``, which returns +a JSON blob. The compute manager calls this and passes the data over RPC +between calls without ever looking at it. This is driver-specific opaque data. +It is also only used by the libvirt driver, despite being part of the API for +all drivers. Other drivers do not return any data. The most interesting aspect +of ``instance_disk_info`` is that it is generated from the libvirt XML, not +from nova's state. + +.. note:: + + ``instance_disk_info`` is often named ``disk_info`` in code, which + is unfortunate as this clashes with the normal naming of the next + structure. Occasionally the two are used in the same block of code. + +.. note:: + + RBD disks (including non-volume disks) and cinder volumes + are not included in ``instance_disk_info``. + +``instance_disk_info`` is a list of dicts for some of an instance's disks. Each +dict contains the following: + +``type`` + libvirt's notion of the disk's type +``path`` + libvirt's notion of the disk's path +``virt_disk_size`` + The disk's virtual size in bytes (the size the guest OS sees) +``backing_file`` + libvirt's notion of the backing file path +``disk_size`` + The file size of path, in bytes. +``over_committed_disk_size`` + As-yet-unallocated disk size, in bytes. + +``disk_info`` +------------- + +.. note:: + + As opposed to ``instance_disk_info``, which is frequently called + ``disk_info``. + +This data structure is actually described pretty well in the comment block at +the top of ``nova.virt.libvirt.blockinfo``. It is internal to the libvirt +driver. It contains: + +``disk_bus`` + The default bus used by disks +``cdrom_bus`` + The default bus used by cdrom drives +``mapping`` + Defined below + +``mapping`` is a dict which maps disk names to a dict describing how that disk +should be passed to libvirt. This mapping contains every disk connected to the +instance, both local and volumes. + +First, a note on disk naming. Local disk names used by the libvirt driver are +well defined. They are: + +``disk`` + The root disk +``disk.local`` + The flavor-defined ephemeral disk +``disk.ephX`` + Where X is a zero-based index for BDM defined ephemeral disks +``disk.swap`` + The swap disk +``disk.config`` + The config disk + +These names are hardcoded, reliable, and used in lots of places. + +In ``disk_info``, volumes are keyed by device name, eg 'vda', 'vdb'. Different +buses will be named differently, approximately according to legacy Linux +device naming. + +Additionally, ``disk_info`` will contain a mapping for 'root', which is the +root disk. This will duplicate one of the other entries, either 'disk' or a +volume mapping. + +Each dict within the ``mapping`` dict contains the following 3 required fields +of bus, dev and type with two optional fields of format and ``boot_index``: + +``bus``: + The guest bus type ('ide', 'virtio', 'scsi', etc) +``dev``: + The device name 'vda', 'hdc', 'sdf', 'xvde' etc +``type``: + Type of device eg 'disk', 'cdrom', 'floppy' +``format`` + Which format to apply to the device if applicable +``boot_index`` + Number designating the boot order of the device + +.. note:: + + ``BlockDeviceMapping`` and ``DriverBlockDevice`` store boot index + zero-based. However, libvirt's boot index is 1-based, so the value stored + here is 1-based. + +.. todo:: + + Add a section for the per disk ``disk.info`` file within instance + directory when using the libvirt driver. diff --git a/doc/source/user/conductor.rst b/doc/source/reference/conductor.rst similarity index 94% rename from doc/source/user/conductor.rst rename to doc/source/reference/conductor.rst index 3619835dd64..ae4cc9c1b9f 100644 --- a/doc/source/user/conductor.rst +++ b/doc/source/reference/conductor.rst @@ -66,7 +66,3 @@ this was to change to process to be the following: This new process means the scheduler only deals with scheduling, the compute only deals with building an instance, and the conductor manages the workflow. The code is now cleaner in the scheduler and computes. - -The resize/migrate process has not yet been fully converted to a style to take -advantage of what conductor can provide so expect that this will change over -time. diff --git a/doc/source/reference/database-migrations.rst b/doc/source/reference/database-migrations.rst new file mode 100644 index 00000000000..add7597e93b --- /dev/null +++ b/doc/source/reference/database-migrations.rst @@ -0,0 +1,187 @@ +=================== +Database migrations +=================== + +.. note:: + + This document details how to generate database migrations as part of a new + feature or bugfix. For info on how to apply existing database migrations, + refer to the documentation for the :program:`nova-manage db sync` and + :program:`nova-manage api_db sync` commands in :doc:`/cli/nova-manage`. + For info on the general upgrade process for a nova deployment, refer to + :doc:`/admin/upgrades`. + +A typical nova deployments consists of an "API" database and one or more +cell-specific "main" databases. Occasionally these databases will require +schema or data migrations. + + +Schema migrations +----------------- + +.. versionchanged:: 24.0.0 (Xena) + + The database migration engine was changed from ``sqlalchemy-migrate`` to + ``alembic``. + +The `alembic`__ database migration tool is used to manage schema migrations in +nova. The migration files and related metadata can be found in +``nova/db/api/migrations`` (for the API database) and +``nova/db/main/migrations`` (for the main database(s)). As discussed in +:doc:`/admin/upgrades`, these can be run by end users using the +:program:`nova-manage api_db sync` and :program:`nova-manage db sync` commands, +respectively. + +.. __: https://alembic.sqlalchemy.org/en/latest/ + +.. note:: + + There are also legacy migrations provided in the ``legacy_migrations`` + subdirectory for both the API and main databases. These are provided to + facilitate upgrades from pre-Xena (24.0.0) deployments and will be removed + in a future release. They should not be modified or extended. + +The best reference for alembic is the `alembic documentation`__, but a small +example is provided here. You can create the migration either manually or +automatically. Manual generation might be necessary for some corner cases such +as renamed tables but auto-generation will typically handle your issues. +Examples of both are provided below. In both examples, we're going to +demonstrate how you could add a new model, ``Foo``, to the main database. + +.. __: https://alembic.sqlalchemy.org/en/latest/ + +.. code-block:: diff + + diff --git nova/db/main/models.py nova/db/main/models.py + index 7eab643e14..8f70bcdaca 100644 + --- nova/db/main/models.py + +++ nova/db/main/models.py + @@ -73,6 +73,16 @@ def MediumText(): + sqlalchemy.dialects.mysql.MEDIUMTEXT(), 'mysql') + + + +class Foo(BASE, models.SoftDeleteMixin): + + """A test-only model.""" + + + + __tablename__ = 'foo' + + + + id = sa.Column(sa.Integer, primary_key=True) + + uuid = sa.Column(sa.String(36), nullable=True) + + bar = sa.Column(sa.String(255)) + + + + + class Service(BASE, models.SoftDeleteMixin): + """Represents a running service on a host.""" + +(you might not be able to apply the diff above cleanly - this is just a demo). + +.. rubric:: Auto-generating migration scripts + +In order for alembic to compare the migrations with the underlying models, it +require a database that it can inspect and compare the models against. As such, +we first need to create a working database. We'll bypass ``nova-manage`` for +this and go straight to the :program:`alembic` CLI. The ``alembic.ini`` file +provided in the ``migrations`` directories for both databases is helpfully +configured to use an SQLite database by default (``nova.db`` for the main +database and ``nova_api.db`` for the API database). Create this database and +apply the current schema, as dictated by the current migration scripts: + +.. code-block:: bash + + $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ + upgrade head + +Once done, you should notice the new ``nova.db`` file in the root of the repo. +Now, let's generate the new revision: + +.. code-block:: bash + + $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ + revision -m "Add foo model" --autogenerate + +This will create a new file in ``nova/db/main/migrations`` with +``add_foo_model`` in the name including (hopefully!) the necessary changes to +add the new ``Foo`` model. You **must** inspect this file once created, since +there's a chance you'll be missing imports or something else which will need to +be manually corrected. Once you've inspected this file and made any required +changes, you can apply the migration and make sure it works: + +.. code-block:: bash + + $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ + upgrade head + +.. rubric:: Manually generating migration scripts + +For trickier migrations or things that alembic doesn't understand, you may need +to manually create a migration script. This is very similar to the +auto-generation step, with the exception being that you don't need to have a +database in place beforehand. As such, you can simply run: + +.. code-block:: bash + + $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ + revision -m "Add foo model" + +As before, this will create a new file in ``nova/db/main/migrations`` with +``add_foo_model`` in the name. You can simply modify this to make whatever +changes are necessary. Once done, you can apply the migration and make sure it +works: + +.. code-block:: bash + + $ tox -e venv -- alembic -c nova/db/main/alembic.ini \ + upgrade head + + +Data migrations +--------------- + +As discussed in :doc:`/admin/upgrades`, online data migrations occur in two +places: + +- Inline migrations that occur as part of normal run-time activity as data is + read in the old format and written in the new format. + +- Background online migrations that are performed using ``nova-manage`` to + complete transformations that will not occur incidentally due to normal + runtime activity. + +.. rubric:: Inline data migrations + +Inline data migrations are arguably the easier of the two to implement. Almost +all of nova's database models correspond to an oslo.versionedobject (o.vo) or +part of one. These o.vos load their data from the underlying database by +implementing the ``obj_load_attr`` method. By modifying this method, it's +possible to detect missing changes to the data - for example, a missing field - +modify the data, save it back to the database, and finally return an object +with the newly updated data. Change I6cd206542fdd28f3ef551dcc727f4cb35a53f6a3 +provides a fully worked example of this approach. + +The main advantage of these is that they are completely transparent to the +operator who does not have to take any additional steps to upgrade their +deployment: the database updates should happen at runtime as data is pulled +from the database. The main disadvantage of this approach is that some +records may not be frequently pulled from the database, meaning they never have +a chance to get updated. This can prevent the eventual removal of the inline +migration in a future release. To avoid this issue, you should inspect the +object to see if it's something that will be loaded as part of a standard +runtime operation - for example, on startup or as part of a background task - +and if necessary add a blocking online migration in a later release to catch +and migrate the laggards. + +.. rubric:: Online data migrations + +Unlike inline data migrations, online data migrations require operator +involvement. They are run using the ``nova-manage db online_data_migrations`` +command which, as noted in :doc:`/cli/nova-manage`, this should be run straight +after upgrading to a new release once the database schema migrations have been +applied and the code updated. Online migrations can be blocking, in that it +will be necessary to apply given migrations while running N code before +upgrading to N+1. Change I44919422c48570f2647f2325ff895255fc2adf27 provides a +fully worked example of this approach. + +The advantages and disadvantages of this approach are the inverse of those of +the inline data migrations approach. While they can be used to ensure an data +migration is actually applied, they require operator involvement and can +prevent upgrades until fully applied. diff --git a/doc/source/reference/glossary.rst b/doc/source/reference/glossary.rst new file mode 100644 index 00000000000..304467e908d --- /dev/null +++ b/doc/source/reference/glossary.rst @@ -0,0 +1,52 @@ +======== +Glossary +======== + +.. glossary:: + + Availability Zone + Availability zones are a logical subdivision of cloud block storage, + compute and network services. They provide a way for cloud operators to + logically segment their compute based on arbitrary factors like + location (country, datacenter, rack), network layout and/or power + source. + + For more information, refer to :doc:`/admin/aggregates`. + + Boot From Volume + A server that is created with a + :doc:`Block Device Mapping ` with + ``boot_index=0`` and ``destination_type=volume``. The root volume can + already exist when the server is created or be created by the compute + service as part of the server creation. Note that a server can have + volumes attached and not be boot-from-volume. A boot from volume server + has an empty ("") ``image`` parameter in ``GET /servers/{server_id}`` + responses. + + Cross-Cell Resize + A resize (or cold migrate) operation where the source and destination + compute hosts are mapped to different cells. By default, resize and + cold migrate operations occur within the same cell. + + For more information, refer to + :doc:`/admin/configuration/cross-cell-resize`. + + Host Aggregate + Host aggregates can be regarded as a mechanism to further partition an + :term:`Availability Zone`; while availability zones are visible to + users, host aggregates are only visible to administrators. Host + aggregates provide a mechanism to allow administrators to assign + key-value pairs to groups of machines. Each node can have multiple + aggregates, each aggregate can have multiple key-value pairs, and the + same key-value pair can be assigned to multiple aggregates. + + For more information, refer to :doc:`/admin/aggregates`. + + Same-Cell Resize + A resize (or cold migrate) operation where the source and destination + compute hosts are mapped to the same cell. Also commonly referred to + as "standard resize" or simply "resize". By default, resize and + cold migrate operations occur within the same cell. + + For more information, refer to + :doc:`/contributor/resize-and-cold-migrate`. diff --git a/doc/source/reference/i18n.rst b/doc/source/reference/i18n.rst index 799049b91fc..7795fbe3517 100644 --- a/doc/source/reference/i18n.rst +++ b/doc/source/reference/i18n.rst @@ -15,9 +15,7 @@ network). One upon a time there was an effort to translate log messages in OpenStack projects. But starting with the Ocata release these are no -longer being supported. Log messages **should not** be translated. Any -use of ``_LI()``, ``_LW()``, ``_LE()``, ``_LC()`` are vestigial and -will be removed over time. No new uses of these should be added. +longer being supported. Log messages **should not** be translated. You should use the basic wrapper ``_()`` for strings which are not log messages that are expected to get to an end user:: @@ -25,6 +23,7 @@ messages that are expected to get to an end user:: raise nova.SomeException(_('Invalid service catalogue')) Do not use ``locals()`` for formatting messages because: + 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index b763b3732c1..6b397f5f5cf 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -6,6 +6,8 @@ The nova project is large, and there are lots of complicated parts in it where it helps to have an overview to understand how the internals of a particular part work. +.. _reference-internals: + Internals ========= @@ -13,6 +15,8 @@ The following is a dive into some of the internals in nova. * :doc:`/reference/rpc`: How nova uses AMQP as an RPC transport * :doc:`/reference/scheduling`: The workflow through the scheduling process +* :doc:`/reference/scheduler-hints-vs-flavor-extra-specs`: The similarities + and differences between flavor extra specs and scheduler hints. * :doc:`/reference/live-migration`: The live migration flow * :doc:`/reference/services`: Module descriptions for some of the key modules used in starting / running services @@ -24,6 +28,45 @@ The following is a dive into some of the internals in nova. nova, and considerations when adding notifications. * :doc:`/reference/update-provider-tree`: A detailed explanation of the ``ComputeDriver.update_provider_tree`` method. +* :doc:`/reference/upgrade-checks`: A guide to writing automated upgrade + checks. +* :doc:`/reference/database-migrations`: A guide to writing database + migrations, be they online or offline. +* :doc:`/reference/conductor` + +.. todo:: Need something about versioned objects and how they fit in with + conductor as an object backporter during upgrades. + +* :doc:`/reference/isolate-aggregates`: Describes how the placement filter + works in nova to isolate groups of hosts. +* :doc:`/reference/attach-volume`: Describes the attach volume flow, using the + libvirt virt driver as an example. +* :doc:`/reference/block-device-structs`: Block Device Data Structures +* :doc:`/reference/libvirt-distro-support-matrix`: Libvirt virt driver OS + distribution support matrix + +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + rpc + scheduling + scheduler-hints-vs-flavor-extra-specs + live-migration + services + vm-states + threading + notifications + database-migrations + update-provider-tree + upgrade-checks + conductor + isolate-aggregates + api-microversion-history + attach-volume + block-device-structs + libvirt-distro-support-matrix Debugging ========= @@ -31,6 +74,13 @@ Debugging * :doc:`/reference/gmr`: Inspired by Amiga, a way to trigger a very comprehensive dump of a running service for deep debugging. +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + gmr + Forward Looking Plans ===================== @@ -42,9 +92,32 @@ these documents will move into the "Internals" section. If you want to get involved in shaping the future of nova's architecture, these are a great place to start reading up on the current plans. -* :doc:`/user/cells`: Comparison of Cells v1 and v2, and how v2 is evolving +* :doc:`/user/cells`: How cells v2 is evolving * :doc:`/reference/policy-enforcement`: How we want policy checks on API actions to work in the future * :doc:`/reference/stable-api`: What stable api means to nova * :doc:`/reference/scheduler-evolution`: Motivation behind the scheduler / placement evolution + +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + /user/cells + policy-enforcement + stable-api + scheduler-evolution + +Additional Information +====================== + +* :doc:`/reference/glossary`: A quick reference guide to some of the terms you + might encounter working on or using nova. + +.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to + # keep the document structure in the PDF doc. +.. toctree:: + :hidden: + + glossary diff --git a/doc/source/reference/isolate-aggregates.rst b/doc/source/reference/isolate-aggregates.rst new file mode 100644 index 00000000000..f5487df9129 --- /dev/null +++ b/doc/source/reference/isolate-aggregates.rst @@ -0,0 +1,99 @@ +.. + Copyright 2019 NTT DATA + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Filtering hosts by isolating aggregates +======================================= + +Background +----------- + +I want to set up an aggregate ``ABC`` with hosts that allow you to run only +certain licensed images. I could tag the aggregate with metadata such as +````. Then if I boot an instance with an image containing the +property ````, it will land on one of the hosts in aggregate +``ABC``. But if the user creates a new image which does not include +```` metadata, an instance booted with that image could still +land on a host in aggregate ``ABC`` as reported in launchpad bug `1677217`_. +The :ref:`AggregateImagePropertiesIsolation` scheduler filter passes even +though the aggregate metadata ```` is not present in the +image properties. + +.. _1677217: https://bugs.launchpad.net/nova/+bug/1677217 + +Solution +-------- + +The above problem is addressed by blueprint +`placement-req-filter-forbidden-aggregates`_ which was implemented in the +20.0.0 Train release. + +The following example assumes you have configured aggregate ``ABC`` and added +hosts ``HOST1`` and ``HOST2`` to it in Nova, and that you want to isolate those +hosts to run only instances requiring Windows licensing. + +#. Set the :oslo.config:option:`scheduler.enable_isolated_aggregate_filtering` + config option to ``true`` in nova.conf and restart the nova-scheduler + service. + +#. Add trait ``CUSTOM_LICENSED_WINDOWS`` to the resource providers for + ``HOST1`` and ``HOST2`` in the Placement service. + + First create the ``CUSTOM_LICENSED_WINDOWS`` trait + + .. code-block:: console + + # openstack --os-placement-api-version 1.6 trait create CUSTOM_LICENSED_WINDOWS + + Assume ```` is the UUID of ``HOST1``, which is the same as its resource provider UUID. + + Start to build the command line by first collecting existing traits for ``HOST1`` + + .. code-block:: console + + # traits=$(openstack --os-placement-api-version 1.6 resource provider trait list -f value | sed 's/^/--trait /') + + Replace ``HOST1``\ 's traits, adding ``CUSTOM_LICENSED_WINDOWS`` + + .. code-block:: console + + # openstack --os-placement-api-version 1.6 resource provider trait set $traits --trait CUSTOM_LICENSED_WINDOWS + + Repeat the above steps for ``HOST2``. + +#. Add the ``trait:CUSTOM_LICENSED_WINDOWS=required`` metadata property to + aggregate ``ABC``. + + .. code-block:: console + + # openstack --os-compute-api-version 2.53 aggregate set --property trait:CUSTOM_LICENSED_WINDOWS=required ABC + +As before, any instance spawned with a flavor or image containing +``trait:CUSTOM_LICENSED_WINDOWS=required`` will land on ``HOST1`` or ``HOST2`` +because those hosts expose that trait. + +However, now that the ``isolate_aggregates`` request filter is configured, +any instance whose flavor or image **does not** contain +``trait:CUSTOM_LICENSED_WINDOWS=required`` will **not** land on ``HOST1`` or +``HOST2`` because aggregate ``ABC`` requires that trait. + +The above example uses a ``CUSTOM_LICENSED_WINDOWS`` trait, but you can use any +custom or `standard trait`_ in a similar fashion. + +The filter supports the use of multiple traits across multiple aggregates. The +combination of flavor and image metadata must require **all** of the traits +configured on the aggregate in order to pass. + +.. _placement-req-filter-forbidden-aggregates: https://specs.openstack.org/openstack/nova-specs/specs/train/approved/placement-req-filter-forbidden-aggregates.html +.. _standard trait: https://docs.openstack.org/os-traits/latest/ diff --git a/doc/source/reference/libvirt-distro-support-matrix.rst b/doc/source/reference/libvirt-distro-support-matrix.rst new file mode 100644 index 00000000000..ec5ab4e35e2 --- /dev/null +++ b/doc/source/reference/libvirt-distro-support-matrix.rst @@ -0,0 +1,323 @@ +Libvirt virt driver OS distribution support matrix +================================================== + +This page documents the libvirt versions present in the various distro versions +that OpenStack Nova aims to be deployable with. + +.. note:: + + This document was previously hosted on the OpenStack wiki: + + https://wiki.openstack.org/wiki/LibvirtDistroSupportMatrix + +Libvirt minimum version change policy +------------------------------------- + +At the start of each Nova development cycle this matrix will be consulted to +determine if it is viable to drop support for any end-of-life or otherwise +undesired distro versions. Based on this distro evaluation, it may be possible +to increase the minimum required version of libvirt in Nova, and thus drop some +compatibility code for older versions. + +When a decision to update the minimum required libvirt version is made, there +must be a warning issued for one cycle. This is achieved by editing +``nova/virt/libvirt/driver.py`` to set ``NEXT_MIN_LIBVIRT_VERSION``. +For example: + +.. code:: + + NEXT_MIN_LIBVIRT_VERSION = (X, Y, Z) + +This causes a deprecation warning to be emitted when Nova starts up warning the +admin that the version of libvirt in use on the host will no longer be +supported in the subsequent release. + +After a version has been listed in ``NEXT_MIN_LIBVIRT_VERSION`` for one release +cycle, the corresponding actual minimum required libvirt can be updated by +setting + +.. code:: + + MIN_LIBVIRT_VERSION = (X, Y, Z) + +At this point of course, an even newer version might be set in +``NEXT_MIN_LIBVIRT_VERSION`` to repeat the process.... + +An email should also be sent at this point to the +``openstack-discuss@lists.openstack.org`` mailing list as a courtesy raising +awareness of the change in minimum version requirements in the upcoming +release, for example: + +http://lists.openstack.org/pipermail/openstack-discuss/2021-January/019849.html + +There is more background on the rationale used for picking minimum versions in +the operators mailing list thread here: + +http://lists.openstack.org/pipermail/openstack-operators/2015-May/007012.html + +QEMU minimum version change policy +---------------------------------- + +After choosing a minimum libvirt version, the minimum QEMU version is +determined by looking for the lowest QEMU version from all the distros that +support the decided libvirt version. + +``MIN_{LIBVIRT,QEMU}_VERSION`` and ``NEXT_MIN_{LIBVIRT,QEMU}_VERSION`` table +---------------------------------------------------------------------------- + +.. list-table:: OpenStack Nova libvirt/QEMU Support Matrix + + * - OpenStack Release + - Nova Release + - ``MIN_LIBVIRT_VERSION`` + - ``NEXT_MIN_LIBVIRT_VERSION`` + - ``MIN_QEMU_VERSION`` + - ``NEXT_MIN_QEMU_VERSION`` + * - Havana + - 2013.2.0 + - 0.9.6 + - 0.9.6 + - + - + * - Icehouse + - 2014.1 + - 0.9.6 + - 0.9.11 + - + - + * - Juno + - 2014.2.0 + - 0.9.11 + - 0.9.11 + - + - + * - Kilo + - 2015.1.0 + - 0.9.11 + - 0.9.11 + - + - + * - Liberty + - 12.0.0 + - 0.9.11 + - 0.10.2 + - + - + * - Mitaka + - 13.0.0 + - 0.10.2 + - 1.2.1 + - + - + * - Newton + - 14.0.0 + - 1.2.1 + - 1.2.1 + - 1.5.3 + - 1.5.3 + * - Ocata + - 15.0.0 + - 1.2.1 + - 1.2.9 + - 1.5.3 + - 2.1.0 + * - Pike + - 16.0.0 + - 1.2.9 + - 1.3.1 + - 2.1.0 + - 2.5.0 + * - Queens + - 17.0.0 + - 1.2.9 + - 1.3.1 + - 2.1.0 + - 2.5.0 + * - Rocky + - 18.0.0 + - 1.3.1 + - 3.0.0 + - 2.5.0 + - 2.8.0 + * - Stein + - 19.0.0 + - 3.0.0 + - 4.0.0 + - 2.8.0 + - 2.11.0 + * - Train + - 20.0.0 + - 3.0.0 + - 4.0.0 + - 2.8.0 + - 2.11.0 + * - Ussuri + - 21.0.0 + - 4.0.0 + - 5.0.0 + - 2.11.0 + - 4.0.0 + * - Victoria + - 22.0.0 + - 5.0.0 + - 6.0.0 + - 4.0.0 + - 4.2.0 + * - Wallaby + - 23.0.0 + - 6.0.0 + - 7.0.0 + - 4.2.0 + - 5.2.0 + +OS distribution versions +------------------------ + +This table provides information on a representative sample of OS distros and +the version of libirt/QEMU/libguestfs that they ship. This is **NOT** intended +to be an exhaustive list of distros where OpenStack Nova can run - it is +intended to run on any Linux distro that can satisfy the minimum required +software versions. This table merely aims to help identify when minimum +required versions can be reasonably updated without losing support for +important OS distros. + +.. list-table:: Distro libvirt/QEMU Support Table + + * - OS Distro + - GA date + - Libvirt + - QEMU/KVM + - libguestfs + * - **Debian** + - + - + - + - + * - 10.x (Buster) ("stable") + - as of 2020-05-15 + - 5.0.0 + - 3.1 + - 1.40 + * - 11.x (Bullseye) ("sid" - unstable) + - No GA date as of 2020-05-15 + - 6.0.0 + - 5.0 + - 1.42.0 + * - **Fedora** + - + - + - + - + * - 32 + - 2020-04-28 + - 6.1.0 + - 4.2.0 + - 1.42.0 + * - 33 + - 2020-10-27 + - 6.6.0 + - 5.1.0 + - 1.43.0 + * - 34 + - 2021-04-27 + - 7.0.0 + - 5.2.0 + - 1.45.4 + * - **SUSE** + - + - + - + - + * - Leap 15.0 + - 2018-05 + - 4.0.0 + - 2.11.1 + - 1.38.0 + * - Leap 15.1 + - 2019-05-22 + - 5.1.0 + - 3.1.1 + - 1.38.0 + * - Leap 15.2 + - 2020-07-02 + - 6.0.0 + - 4.2.0 + - 1.38.0 + * - Leap 15.3 + - 2021-06-02 (scheduled) + - 7.2.0 + - 6.0.0 + - 1.44.1 + * - **RHEL** + - + - + - + - + * - 7.7 + - 2019-08-06 + - 4.5.0-23 + - 2.12.0-33 + - 1.40.2-5 + * - 7.8 + - 2020-03-31 + - 4.5.0-36 + - 2.12.0-48 + - 1.40.2-10 + * - 8.2 + - 2020-04-28 + - 6.0.0-17.2 + - 4.2.0-19 + - 1.40.2-22 + * - 8.3 + - 2020-10-29 + - 6.0.0-25.5 + - 4.2.0-29 + - 1.40.2-24 + * - 8.4 + - 2021-05-18 + - 7.0.0-8 + - 5.2.0-10 + - 1.44.0-2 + * - **SLES** + - + - + - + - + * - 15 + - 2018-07 + - 4.0.0 + - 2.11.1 + - 1.38.0 + * - 15.1 + - 2019 + - 5.1.0 + - 3.1.1 + - 1.38.0 + * - 15.2 + - 2020 + - 6.0.0 + - 4.2.1 + - 1.38.0 + * - **Ubuntu** + - + - + - + - + * - 18.04 (Bionic LTS - Cloud Archive) + - as of 2019-11-18 + - 5.4.0 + - 4.0 + - 1.36 + * - 20.04 (Focal Fossa LTS) + - 2020-04-23 + - 6.0.0 + - 4.2 + - 1.40.2 + * - 21.04 (Hirsute Hippo) + - 2021-04-22 + - 7.0.0 + - 5.2 + - 1.44.1 + +.. NB: maintain alphabetical ordering of distros, followed by oldest released + versions first diff --git a/doc/source/reference/notifications.rst b/doc/source/reference/notifications.rst index dda515a7ea4..788b3bccde2 100644 --- a/doc/source/reference/notifications.rst +++ b/doc/source/reference/notifications.rst @@ -47,10 +47,11 @@ Nova code uses the nova.rpc.get_notifier call to get a configured oslo.messaging Notifier object and it uses the oslo provided functions on the Notifier object to emit notifications. The configuration of the returned Notifier object depends on the parameters of the get_notifier call and the -value of the oslo.messaging configuration options `driver` and `topics`. +value of the oslo.messaging configuration options ``driver`` and ``topics``. There are notification configuration options in Nova which are specific for -certain notification types like `notifications.notify_on_state_change`, -`notifications.default_level`, etc. +certain notification types like +:oslo.config:option:`notifications.notify_on_state_change`, +:oslo.config:option:`notifications.default_level`, etc. The structure of the payload of the unversioned notifications is defined in the code that emits the notification and no documentation or enforced backward @@ -67,8 +68,8 @@ serialized :oslo.versionedobjects-doc:`oslo versionedobjects object <>`. .. _service.update: -For example the wire format of the `service.update` notification looks like the -following:: +For example the wire format of the ``service.update`` notification looks like +the following:: { "priority":"INFO", @@ -97,9 +98,9 @@ the consumer so the consumer can detect if the structure of the payload is changed. Nova provides the following contract regarding the versioned notification payload: -* the payload version defined by the `the nova_object.version` field of the +* the payload version defined by the ``nova_object.version`` field of the payload will be increased if and only if the syntax or the semantics of the - `nova_object.data` field of the payload is changed. + ``nova_object.data`` field of the payload is changed. * a minor version bump indicates a backward compatible change which means that only new fields are added to the payload so a well written consumer can still consume the new payload without any change. @@ -110,27 +111,32 @@ notification payload: the nova internal representation of the payload type. Client code should not depend on this name. -There is a Nova configuration parameter `notifications.notification_format` -that can be used to specify which notifications are emitted by Nova. The -possible values are `unversioned`, `versioned`, `both` and the default value -is `both`. +There is a Nova configuration parameter +:oslo.config:option:`notifications.notification_format` +that can be used to specify which notifications are emitted by Nova. The versioned notifications are emitted to a different topic than the legacy notifications. By default they are emitted to 'versioned_notifications' but it -is configurable in the nova.conf with the `versioned_notifications_topic` +is configurable in the nova.conf with the +:oslo.config:option:`notifications.versioned_notifications_topics` config option. +A `presentation from the Train summit`_ goes over the background and usage of +versioned notifications, and provides a demo. + +.. _presentation from the Train summit: https://www.openstack.org/videos/summits/denver-2019/nova-versioned-notifications-the-result-of-a-3-year-journey + How to add a new versioned notification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To support the above contract from the Nova code every versioned notification is modeled with oslo versionedobjects. Every versioned notification class -shall inherit from the `nova.notifications.objects.base.NotificationBase` which -already defines three mandatory fields of the notification `event_type`, -`publisher_id` and `priority`. The new notification class shall add a new field -`payload` with an appropriate payload type. The payload object of the -notifications shall inherit from the -`nova.objects.notifications.base.NotificationPayloadBase` class and shall +shall inherit from the ``nova.notifications.objects.base.NotificationBase`` +which already defines three mandatory fields of the notification +``event_type``, ``publisher`` and ``priority``. The new notification class +shall add a new field ``payload`` with an appropriate payload type. The payload +object of the notifications shall inherit from the +``nova.notifications.objects.base.NotificationPayloadBase`` class and shall define the fields of the payload as versionedobject fields. The base classes are described in the following section. @@ -147,7 +153,7 @@ objects. Instead of that use the register_notification decorator on every concrete notification object. The following code example defines the necessary model classes for a new -notification `myobject.update`:: +notification ``myobject.update``:: @notification.notification_sample('myobject-update.json') @object_base.NovaObjectRegistry.register.register_notification @@ -202,10 +208,11 @@ The above code will generate the following notification on the wire:: There is a possibility to reuse an existing versionedobject as notification -payload by adding a `SCHEMA` field for the payload class that defines a mapping -between the fields of existing objects and the fields of the new payload -object. For example the service.status notification reuses the existing -`nova.objects.service.Service` object when defines the notification's payload:: +payload by adding a ``SCHEMA`` field for the payload class that defines a +mapping between the fields of existing objects and the fields of the new +payload object. For example the service.status notification reuses the existing +``nova.objects.service.Service`` object when defines the notification's +payload:: @notification.notification_sample('service-update.json') @object_base.NovaObjectRegistry.register.register_notification @@ -249,8 +256,8 @@ object. For example the service.status notification reuses the existing def populate_schema(self, service): super(ServiceStatusPayload, self).populate_schema(service=service) -If the `SCHEMA` field is defined then the payload object needs to be populated -with the `populate_schema` call before it can be emitted:: +If the ``SCHEMA`` field is defined then the payload object needs to be +populated with the ``populate_schema`` call before it can be emitted:: payload = ServiceStatusPayload() payload.populate_schema(service=) @@ -266,33 +273,35 @@ with the `populate_schema` call before it can be emitted:: The above code will emit the :ref:`already shown notification` on the wire. -Every item in the `SCHEMA` has the syntax of:: +Every item in the ``SCHEMA`` has the syntax of:: : (, ) -The mapping defined in the `SCHEMA` field has the following semantics. When -the `populate_schema` function is called the content of the `SCHEMA` field is -enumerated and the value of the field of the pointed parameter object is copied -to the requested payload field. So in the above example the `host` field of -the payload object is populated from the value of the `host` field of the -`service` object that is passed as a parameter to the `populate_schema` call. +The mapping defined in the ``SCHEMA`` field has the following semantics. When +the ``populate_schema`` function is called the content of the ``SCHEMA`` field +is enumerated and the value of the field of the pointed parameter object is +copied to the requested payload field. So in the above example the ``host`` +field of the payload object is populated from the value of the ``host`` field +of the ``service`` object that is passed as a parameter to the +``populate_schema`` call. A notification payload object can reuse fields from multiple existing objects. Also a notification can have both new and reused fields in its payload. Note that the notification's publisher instance can be created two different -ways. It can be created by instantiating the `NotificationPublisher` object -with a `host` and a `binary` string parameter or it can be generated from a -`Service` object by calling `NotificationPublisher.from_service_obj` function. +ways. It can be created by instantiating the ``NotificationPublisher`` object +with a ``host`` and a ``source`` string parameter or it can be generated from a +``Service`` object by calling ``NotificationPublisher.from_service_obj`` +function. Versioned notifications shall have a sample file stored under -`doc/sample_notifications` directory and the notification object shall be -decorated with the `notification_sample` decorator. For example the -`service.update` notification has a sample file stored in -`doc/sample_notifications/service-update.json` and the +``doc/sample_notifications`` directory and the notification object shall be +decorated with the ``notification_sample`` decorator. For example the +``service.update`` notification has a sample file stored in +``doc/sample_notifications/service-update.json`` and the ServiceUpdateNotification class is decorated accordingly. Notification payload classes can use inheritance to avoid duplicating common @@ -327,11 +336,28 @@ requires the notification. object and use the SCHEMA field to map the internal object to the notification payload. This way the evolution of the internal object model can be decoupled from the evolution of the notification payload. + + .. important:: This does not mean that every field from internal objects + should be mirrored in the notification payload objects. + Think about what is actually needed by a consumer before + adding it to a payload. When in doubt, if no one is requesting + specific information in notifications, then leave it out until + someone asks for it. + * The delete notification should contain the same information as the create or update notifications. This makes it possible for the consumer to listen only to the delete notifications but still filter on some fields of the entity (e.g. project_id). +What should **NOT** be in the notification payload +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Generally anything that contains sensitive information about the internals + of the nova deployment, for example fields that contain access credentials + to a cell database or message queue (see `bug 1823104`_). + +.. _bug 1823104: https://bugs.launchpad.net/nova/+bug/1823104 + Existing versioned notifications ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -345,5 +371,5 @@ Existing versioned notifications .. versioned_notifications:: -.. [1] https://review.openstack.org/#/c/463001/ -.. [2] https://review.openstack.org/#/c/453077/ +.. [1] https://review.opendev.org/#/c/463001/ +.. [2] https://review.opendev.org/#/c/453077/ diff --git a/doc/source/reference/policy-enforcement.rst b/doc/source/reference/policy-enforcement.rst index 5a008822274..22d305e8c93 100644 --- a/doc/source/reference/policy-enforcement.rst +++ b/doc/source/reference/policy-enforcement.rst @@ -15,156 +15,164 @@ under the License. -Rest API Policy Enforcement +REST API Policy Enforcement =========================== -Here is a vision of how we want policy to be enforced in nova. +The following describes some of the shortcomings in how policy is used and +enforced in nova, along with some benefits of fixing those issues. Each issue +has a section dedicated to describing the underlying cause and historical +context in greater detail. Problems with current system ---------------------------- -There are several problems for current API policy. - -* The permission checking is spread through the various levels of the nova - code, also there are some hard-coded permission checks that make some - policies not enforceable. - -* API policy rules need better granularity. Some of extensions just use one - rule for all the APIs. Deployer can't get better granularity control for - the APIs. - -* More easy way to override default policy settings for deployer. And - Currently all the API(EC2, V2, V2.1) rules mix in one policy.json file. - -These are the kinds of things we need to make easier: - -1. Operator wants to enable a specific role to access the service API which -is not possible because there is currently a hard coded admin check. - -2. One policy rule per API action. Having a check in the REST API and a -redundant check in the compute API can confuse developers and deployers. - -3. Operator can specify different rules for APIs that in same extension. - -4. Operator can override the default policy rule easily without mixing his own -config and default config in one policy.json file. - -Future of policy enforcement ----------------------------- - -The generic rule for all the improvement is keep V2 API back-compatible. -Because V2 API may be deprecated after V2.1 parity with V2. This can reduce -the risk we take. The improvement just for EC2 and V2.1 API. There isn't -any user for V2.1, as it isn't ready yet. We have to do change for EC2 API. -EC2 API won't be removed like v2 API. If we keep back-compatible for EC2 API -also, the old compute api layer checks won't be removed forever. EC2 API is -really small than Nova API. It's about 29 APIs without volume and image -related(those policy check done by cinder and glance). So it will affect user -less. - -Enforcement policy at REST API layer -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The policy should be only enforced at REST API layer. This is clear for user -to know where the policy will be enforced. If the policy spread into multiple -layer of nova code, user won't know when and where the policy will be enforced -if they didn't have knowledge about nova code. - -Remove all the permission checking under REST API layer. Policy will only be -enforced at REST API layer. - -This will affect the EC2 API and V2.1 API, there are some API just have policy -enforcement at Compute/Network API layer, those policy will be move to API -layer and renamed. - -Removes hard-code permission checks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Hard-coded permission checks make it impossible to supply a configurable -policy. They should be removed in order to make nova auth completely -configurable. - -This will affect EC2 API and Nova V2.1 API. User need update their policy -rule to match the old hard-code permission. - -For Nova V2 API, the hard-code permission checks will be moved to REST API -layer to guarantee it won't break the back-compatibility. That may ugly -some hard-code permission check in API layer, but V2 API will be removed -once V2.1 API ready, so our choice will reduce the risk. - -Port policy.d into nova -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This feature make deployer can override default policy rule easily. And -When nova default policy config changed, deployer only need replace default -policy config files with new one. It won't affect his own policy config in -other files. - -Use different prefix in policy rule name for EC2/V2/V2.1 API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Currently all the APIs(Nova v2/v2.1 API, EC2 API) use same set of policy -rules. Especially there isn't obvious mapping between those policy rules -and EC2 API. User can know clearly which policy should be configured for -specific API. - -Nova should provide different prefix for policy rule name that used to -group them, and put them in different policy configure file in policy.d - -* EC2 API: Use prefix "ec2_api". The rule looks like "ec2_api:[action]" - -* Nova V2 API: After we move to V2.1, we needn't spend time to change V2 - api rule, and needn't to bother deployer upgrade their policy config. So - just keep V2 API policy rule named as before. - -* Nova V2.1 API: We name the policy rule as - "os_compute_api:[extension]:[action]". The core API may be changed in - the future, so we needn't name them as "compute" or "compute_extension" - to distinguish the core or extension API. - -This will affect EC2 API and V2.1 API. For EC2 API, it need deployer update -their policy config. For V2.1 API, there isn't any user yet, so there won't -any effect. - - -Group the policy rules into different policy files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -After group the policy rules for different API, we can separate them into -different files. Then deployer will more clear for which rule he can set for -specific API. The rules can be grouped as below: - -* policy.json: It only contains the generic rule, like: :: - - "context_is_admin": "role:admin", - "admin_or_owner": "is_admin:True or project_id:%(project_id)s", - "default": "rule:admin_or_owner", - -* policy.d/00-ec2-api.conf: It contains all the policy rules for EC2 API. - -* policy.d/00-v2-api.conf: It contains all the policy rules for nova V2 API. - -* policy.d/00-v2.1-api.conf: It contains all the policy rules for nova v2.1 - API. - -The prefix '00-' is used to order the configure file. All the files in -policy.d will be loaded by alphabetical order. '00-' means those files will -be loaded very early. - -Existed Nova API being restricted ---------------------------------- - -Nova provide default policy rules for all the APIs. Operator should only make -the policy rule more permissive. If the Operator make the API to be restricted -that make break the existed API user or application. That's kind of -back-incompatible. SO Operator can free to add additional permission to the -existed API. - -Policy Enforcement by user_id ------------------------------ - -In the legacy v2 API, the policy enforces with target object, and some operators -implement user-based authorization based on that. Actually only project-based -authorization is well tested, the user based authorization is untested and -isn't supported by Nova. In the future, the nova will remove all the supports -for user-based authorization. +The following is a list of issues with the existing policy enforcement system: + +* `Testing default policies`_ +* `Mismatched authorization`_ +* `Inconsistent naming`_ +* `Incorporating default roles`_ +* `Compartmentalized policy enforcement`_ +* `Refactoring hard-coded permission checks`_ +* `Granular policy checks`_ + +Addressing the list above helps operators by: + +1. Providing them with flexible and useful defaults +2. Reducing the likelihood of writing and maintaining custom policies +3. Improving interoperability between deployments +4. Increasing RBAC confidence through first-class testing and verification +5. Reducing complexity by using consistent policy naming conventions +6. Exposing more functionality to end-users, safely, making the entire nova API + more self-serviceable resulting in less operational overhead for operators + to do things on behalf of users + +Additionally, the following is a list of benefits to contributors: + +1. Reduce developer maintenance and cost by isolating policy enforcement into a + single layer +2. Reduce complexity by using consistent policy naming conventions +3. Increased confidence in RBAC refactoring through exhaustive testing that + prevents regressions before they merge + +Testing default policies +------------------------ + +Testing default policies is important in protecting against authoritative +regression. Authoritative regression is when a change accidentally allows +someone to do something or see something they shouldn't. It can also be when a +change accidentally restricts a user from doing something they used to have the +authorization to perform. This testing is especially useful prior to +refactoring large parts of the policy system. For example, this level of +testing would be invaluable prior to pulling policy enforcement logic from the +database layer up to the API layer. + +`Testing documentation`_ exists that describes the process for developing these +types of tests. + +.. _Testing documentation: https://docs.openstack.org/keystone/latest/contributor/services.html#ruthless-testing + +Mismatched authorization +------------------------ + +The compute API is rich in functionality and has grown to manage both physical +and virtual hardware. Some APIs were meant to assist operators while others +were specific to end users. Historically, nova used project-scoped tokens to +protect almost every API, regardless of the intended user. Using project-scoped +tokens to authorize requests for system-level APIs makes for undesirable +user-experience and is prone to overloading roles. For example, to prevent +every user from accessing hardware level APIs that would otherwise violate +tenancy requires operators to create a ``system-admin`` or ``super-admin`` +role, then rewrite those system-level policies to incorporate that role. This +means users with that special role on a project could access system-level +resources that aren't even tracked against projects (hypervisor information is +an example of system-specific information.) + +As of the Queens release, keystone supports a scope type dedicated to easing +this problem, called system scope. Consuming system scope across the compute +API results in fewer overloaded roles, less specialized authorization logic in +code, and simpler policies that expose more functionality to users without +violating tenancy. Please refer to keystone's `authorization scopes +documentation`_ to learn more about scopes and how to use them effectively. + +.. _authorization scopes documentation: https://docs.openstack.org/keystone/latest/contributor/services.html#authorization-scopes + +Inconsistent naming +------------------- + +Inconsistent conventions for policy names are scattered across most OpenStack +services, nova included. Recently, there was an effort that introduced a +convention that factored in service names, resources, and use cases. This new +convention is applicable to nova policy names. The convention is formally +`documented`_ in oslo.policy and we can use policy `deprecation tooling`_ to +gracefully rename policies. + +.. _documented: https://docs.openstack.org/oslo.policy/latest/user/usage.html#naming-policies +.. _deprecation tooling: https://docs.openstack.org/oslo.policy/latest/reference/api/oslo_policy.policy.html#oslo_policy.policy.DeprecatedRule + +Incorporating default roles +--------------------------- + +Up until the Rocky release, keystone only ensured a single role called +``admin`` +was available to the deployment upon installation. In Rocky, this support was +expanded to include ``member`` and ``reader`` roles as first-class citizens during +keystone's installation. This allows service developers to rely on these roles +and include them in their default policy definitions. Standardizing on a set of +role names for default policies increases interoperability between deployments +and decreases operator overhead. + +You can find more information on default roles in the keystone `specification`_ +or `developer documentation`_. + +.. _specification: http://specs.openstack.org/openstack/keystone-specs/specs/keystone/rocky/define-default-roles.html +.. _developer documentation: https://docs.openstack.org/keystone/latest/contributor/services.html#reusable-default-roles + +Compartmentalized policy enforcement +------------------------------------ + +Policy logic and processing is inherently sensitive and often complicated. It +is sensitive in that coding mistakes can lead to security vulnerabilities. It +is complicated in the resources and APIs it needs to protect and the vast +number of use cases it needs to support. These reasons make a case for +isolating policy enforcement and processing into a compartmentalized space, as +opposed to policy logic bleeding through to different layers of nova. Not +having all policy logic in a single place makes evolving the policy enforcement +system arduous and makes the policy system itself fragile. + +Currently, the database and API components of nova contain policy logic. At +some point, we should refactor these systems into a single component that is +easier to maintain. Before we do this, we should consider approaches for +bolstering testing coverage, which ensures we are aware of or prevent policy +regressions. There are examples and documentation in API protection `testing +guides`_. + +.. _testing guides: https://docs.openstack.org/keystone/latest/contributor/services.html#ruthless-testing + +Refactoring hard-coded permission checks +---------------------------------------- + +The policy system in nova is designed to be configurable. Despite this design, +there are some APIs that have hard-coded checks for specific roles. This makes +configuration impossible, misleading, and frustrating for operators. Instead, +we can remove hard-coded policies and ensure a configuration-driven approach, +which reduces technical debt, increases consistency, and provides better +user-experience for operators. Additionally, moving hard-coded checks into +first-class policy rules let us use existing policy tooling to deprecate, +document, and evolve policies. + +Granular policy checks +---------------------- + +Policies should be as granular as possible to ensure consistency and reasonable +defaults. Using a single policy to protect CRUD for an entire API is +restrictive because it prevents us from using default roles to make delegation +to that API flexible. For example, a policy for ``compute:foobar`` could be +broken into ``compute:foobar:create``, ``compute:foobar:update``, +``compute:foobar:list``, ``compute:foobar:get``, and ``compute:foobar:delete``. +Breaking policies down this way allows us to set read-only policies for +readable operations or use another default role for creation and management of +`foobar` resources. The oslo.policy library has `examples`_ that show how to do +this using deprecated policy rules. + +.. _examples: https://docs.openstack.org/oslo.policy/latest/reference/api/oslo_policy.policy.html#oslo_policy.policy.DeprecatedRule diff --git a/doc/source/reference/scheduler-evolution.rst b/doc/source/reference/scheduler-evolution.rst index 43f1ffd5ba3..5eb003da850 100644 --- a/doc/source/reference/scheduler-evolution.rst +++ b/doc/source/reference/scheduler-evolution.rst @@ -43,14 +43,6 @@ that is close to the shared storage where that volume is. Similarly, for the sake of performance, it can be desirable to use a compute node that is in a particular location in relation to a pre-created port. -Accessing Aggregates in Filters and Weights --------------------------------------------- - -Any DB access in a filter or weight slows down the scheduler. Until the -end of kilo, there was no way to deal with the scheduler accessing -information about aggregates without querying the DB in every call to -host_passes() in a filter. - Filter Scheduler Alternatives ------------------------------ @@ -80,21 +72,6 @@ Key areas we are evolving Here we discuss, at a high level, areas that are being addressed as part of the scheduler evolution work. -Fixing the Scheduler DB model ------------------------------- - -We need the nova and scheduler data models to be independent of each other. - -The first step is breaking the link between the ComputeNode and Service -DB tables. In theory where the Service information is stored should be -pluggable through the service group API, and should be independent of the -scheduler service. For example, it could be managed via zookeeper rather -than polling the nova DB. - -There are also places where filters and weights call into the nova DB to -find out information about aggregates. This needs to be sent to the -scheduler, rather than reading directly from the nova database. - Versioning Scheduler Placement Interfaces ------------------------------------------ @@ -124,9 +101,6 @@ This is linked to the work on the resource tracker. Updating the Scheduler about other data ---------------------------------------- -For things like host aggregates, we need the scheduler to cache information -about those, and know when there are changes so it can update its cache. - Over time, its possible that we need to send cinder and neutron data, so the scheduler can use that data to help pick a nova-compute host. diff --git a/doc/source/reference/scheduler-hints-vs-flavor-extra-specs.rst b/doc/source/reference/scheduler-hints-vs-flavor-extra-specs.rst new file mode 100644 index 00000000000..28f89a9f03f --- /dev/null +++ b/doc/source/reference/scheduler-hints-vs-flavor-extra-specs.rst @@ -0,0 +1,161 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +========================================= +Scheduler hints versus flavor extra specs +========================================= + +People deploying and working on Nova often have questions about flavor extra +specs and scheduler hints and what role they play in scheduling decisions, and +which is a better choice for exposing capability to an end user of the cloud. +There are several things to consider and it can get complicated. This document +attempts to explain at a high level some of the major differences and +drawbacks with both flavor extra specs and scheduler hints. + +Extra Specs +----------- + +In general flavor extra specs are specific to the cloud and how it is +organized for capabilities, and should be abstracted from the end user. +Extra specs are tied to :doc:`host aggregates ` and a lot +of them also define how a guest is created in the hypervisor, for example +what the watchdog action is for a VM. Extra specs are also generally +interchangeable with `image properties`_ when it comes to VM behavior, like +the watchdog example. How that is presented to the user is via the name of +the flavor, or documentation specifically for that deployment, +e.g. instructions telling a user how to setup a baremetal instance. + +.. _image properties: https://docs.openstack.org/glance/latest/admin/useful-image-properties.html + +Scheduler Hints +--------------- + +Scheduler hints, also known simply as "hints", can be specified during server +creation to influence the placement of the server by the scheduler depending +on which scheduler filters are enabled. Hints are mapped to specific filters. +For example, the ``ServerGroupAntiAffinityFilter`` scheduler filter is used +with the ``group`` scheduler hint to indicate that the server being created +should be a member of the specified anti-affinity group and the filter should +place that server on a compute host which is different from all other current +members of the group. + +Hints are not more "dynamic" than flavor extra specs. The end user +specifies a flavor and optionally a hint when creating a server, but +ultimately what they can specify is static and defined by the deployment. + +Similarities +------------ + +* Both scheduler hints and flavor extra specs can be used by + :doc:`scheduler filters `. + +* Both are totally customizable, meaning there is no whitelist within Nova of + acceptable hints or extra specs, unlike image properties [1]_. + +* An end user cannot achieve a new behavior without deployer consent, i.e. + even if the end user specifies the ``group`` hint, if the deployer did not + configure the ``ServerGroupAntiAffinityFilter`` the end user cannot have the + ``anti-affinity`` behavior. + +Differences +----------- + +* A server's host location and/or behavior can change when resized with a + flavor that has different extra specs from those used to create the server. + Scheduler hints can only be specified during server creation, not during + resize or any other "move" operation, but the original hints are still + applied during the move operation. + +* The flavor extra specs used to create (or resize) a server can be retrieved + from the compute API using the `2.47 microversion`_. As of the 19.0.0 Stein + release, there is currently no way from the compute API to retrieve the + scheduler hints used to create a server. + + .. note:: Exposing the hints used to create a server has been proposed [2]_. + Without this, it is possible to workaround the limitation by doing + things such as including the scheduler hint in the server metadata + so it can be retrieved via server metadata later. + +* In the case of hints the end user can decide not to include a hint. On the + other hand the end user cannot create a new flavor (by default policy) to + avoid passing a flavor with an extra spec - the deployer controls the + flavors. + +.. _2.47 microversion: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42 + +Discoverability +--------------- + +When it comes to discoverability, by the default +``os_compute_api:os-flavor-extra-specs:index`` policy rule, flavor extra +specs are more "discoverable" by the end user since they can list them for a +flavor. However, one should not expect an average end user to understand what +different extra specs mean as they are just a key/value pair. There is some +documentation for some "standard" extra specs though [3]_. However, that is +not an exhaustive list and it does not include anything that different +deployments would define for things like linking a flavor to a set of +:doc:`host aggregates `, for example, when creating flavors +for baremetal instances, or what the chosen +:doc:`hypervisor driver ` might support for +flavor extra specs. + +Scheduler hints are less discoverable from an end user perspective than +extra specs. There are some standard hints defined in the API request +schema [4]_. However: + +1. Those hints are tied to scheduler filters and the scheduler filters are + configurable per deployment, so for example the ``JsonFilter`` might not be + enabled (it is not enabled by default), so the ``query`` hint would not do + anything. +2. Scheduler hints are not restricted to just what is in that schema in the + upstream nova code because of the ``additionalProperties: True`` entry in + the schema. This allows deployments to define their own hints outside of + that API request schema for their own + :ref:`custom scheduler filters ` which are not + part of the upstream nova code. + +Interoperability +---------------- + +The only way an end user can really use scheduler hints is based +on documentation (or GUIs/SDKs) that a specific cloud deployment provides for +their setup. So if **CloudA** defines a custom scheduler filter X and a hint +for that filter in their documentation, an end user application can only run +with that hint on that cloud and expect it to work as documented. If the user +moves their application to **CloudB** which does not have that scheduler +filter or hint, they will get different behavior. + +So obviously both flavor extra specs and scheduler hints are not interoperable. + +Which to use? +------------- + +When it comes to defining a custom scheduler filter, you could use a hint or +an extra spec. If you need a flavor extra spec anyway for some behavior in the +hypervisor when creating the guest, or to be able to retrieve the original +flavor extra specs used to create a guest later, then you might as well just +use the extra spec. If you do not need that, then a scheduler hint may be an +obvious choice, from an end user perspective, for exposing a certain scheduling +behavior but it must be well documented and the end user should realize that +hint might not be available in other clouds, and they do not have a good way +of finding that out either. Long-term, flavor extra specs are likely to be +more standardized than hints so ultimately extra specs are the recommended +choice. + +Footnotes +--------- + +.. [1] https://opendev.org/openstack/nova/src/commit/fbe6f77bc1cb41f5d6cfc24ece54d3413f997aab/nova/objects/image_meta.py#L225 +.. [2] https://review.opendev.org/#/c/440580/ +.. [3] https://docs.openstack.org/nova/latest/user/flavors.html#extra-specs +.. [4] https://opendev.org/openstack/nova/src/commit/fbe6f77bc1cb41f5d6cfc24ece54d3413f997aab/nova/api/openstack/compute/schemas/scheduler_hints.py diff --git a/doc/source/reference/scheduling.rst b/doc/source/reference/scheduling.rst index 1dea07ab303..a73710ce7a6 100644 --- a/doc/source/reference/scheduling.rst +++ b/doc/source/reference/scheduling.rst @@ -16,7 +16,7 @@ ============ This is an overview of how scheduling works in nova from Pike onwards. For -information on the scheduler itself, refer to :doc:`/user/filter-scheduler`. +information on the scheduler itself, refer to :doc:`/admin/scheduling`. For an overview of why we've changed how the scheduler works, refer to :doc:`/reference/scheduler-evolution`. diff --git a/doc/source/reference/stable-api.rst b/doc/source/reference/stable-api.rst index c627c344112..462e8e3febf 100644 --- a/doc/source/reference/stable-api.rst +++ b/doc/source/reference/stable-api.rst @@ -36,7 +36,7 @@ Newer code is called the "v2.1 API" and exists in the The v2 API is the old Nova REST API. It is mostly replaced by v2.1 API. The v2.1 API is the new Nova REST API with a set of improvements which -includes `Microversion `_ +includes `Microversion `_ and standardized validation of inputs using JSON-Schema. Also the v2.1 API is totally backwards compatible with the v2 API (That is the reason we call it as v2.1 API). @@ -124,7 +124,7 @@ for more information. The '/extensions' API exposed the list of enabled API functions to users by GET method. However as the above, new API extensions should not be added to the list of this API. The '/extensions' API is frozen in Nova V2.1 API and -is `deprecated `_. +is `deprecated `_. Things which are History now **************************** diff --git a/doc/source/reference/threading.rst b/doc/source/reference/threading.rst index cc463e9ba53..5b33e2dd770 100644 --- a/doc/source/reference/threading.rst +++ b/doc/source/reference/threading.rst @@ -28,9 +28,9 @@ in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: - from eventlet import greenthread - ... - greenthread.sleep(0) + from eventlet import greenthread + ... + greenthread.sleep(0) In current code, time.sleep(0) does the same thing as greenthread.sleep(0) if time module is patched through eventlet.monkey_patch(). To be explicit, we recommend diff --git a/doc/source/reference/update-provider-tree.rst b/doc/source/reference/update-provider-tree.rst index e165f61104d..bc204c5a9ee 100644 --- a/doc/source/reference/update-provider-tree.rst +++ b/doc/source/reference/update-provider-tree.rst @@ -23,13 +23,13 @@ Background ---------- In the movement towards using placement for scheduling and resource management, the virt driver method ``get_available_resource`` was initially superseded by -``get_inventory``, whereby the driver could specify its inventory in terms -understood by placement. In Queens, a ``get_traits`` driver method was added. -But ``get_inventory`` is limited to expressing only inventory (not traits or -aggregates). And both of these methods are limited to the resource provider -corresponding to the compute node. +``get_inventory`` (now gone), whereby the driver could specify its inventory in +terms understood by placement. In Queens, a ``get_traits`` driver method was +added. But ``get_inventory`` was limited to expressing only inventory (not +traits or aggregates). And both of these methods were limited to the resource +provider corresponding to the compute node. -Recent developments such as Nested Resource Providers necessitate the ability +Developments such as Nested Resource Providers necessitate the ability for the virt driver to have deeper control over what the resource tracker configures in placement on behalf of the compute node. This need is filled by the virt driver method ``update_provider_tree`` and its consumption by the @@ -38,7 +38,7 @@ the compute node and its associated providers. The Method ---------- -``update_provider_tree`` accepts two parameters: +``update_provider_tree`` accepts the following parameters: * A ``nova.compute.provider_tree.ProviderTree`` object representing all the providers in the tree associated with the compute node, and any sharing @@ -59,6 +59,38 @@ The Method use this to help identify the compute node provider in the ProviderTree. Drivers managing more than one node (e.g. ironic) may also use it as a cue to indicate which node is being processed by the caller. +* Dictionary of ``allocations`` data of the form: + + .. code:: + + { $CONSUMER_UUID: { + # The shape of each "allocations" dict below is identical + # to the return from GET /allocations/{consumer_uuid} + "allocations": { + $RP_UUID: { + "generation": $RP_GEN, + "resources": { + $RESOURCE_CLASS: $AMOUNT, + ... + }, + }, + ... + }, + "project_id": $PROJ_ID, + "user_id": $USER_ID, + "consumer_generation": $CONSUMER_GEN, + }, + ... + } + + If ``None``, and the method determines that any inventory needs to be moved + (from one provider to another and/or to a different resource class), the + ``ReshapeNeeded`` exception must be raised. Otherwise, this dict must be + edited in place to indicate the desired final state of allocations. Drivers + should *only* edit allocation records for providers whose inventories are + being affected by the reshape operation. For more information about the + reshape operation, refer to the `spec `_. The virt driver is expected to update the ProviderTree object with current resource provider and inventory information. When the method returns, the @@ -98,9 +130,6 @@ aggregates, and traits associated with those resource providers. PF1 PF2 PF3 PF4------BW1 (root) agg2 -This method supersedes ``get_inventory`` and ``get_traits``: if this method is -implemented, neither ``get_inventory`` nor ``get_traits`` is used. - Driver implementations of ``update_provider_tree`` are expected to use public ``ProviderTree`` methods to effect changes to the provider tree passed in. Some of the methods which may be useful are as follows: @@ -143,7 +172,7 @@ would become: .. code:: - def update_provider_tree(self, provider_tree, nodename): + def update_provider_tree(self, provider_tree, nodename, allocations=None): inv_data = { 'VCPU': { ... }, 'MEMORY_MB': { ... }, @@ -151,6 +180,13 @@ would become: } provider_tree.update_inventory(nodename, inv_data) +When reporting inventory for the standard resource classes ``VCPU``, +``MEMORY_MB`` and ``DISK_GB``, implementors of ``update_provider_tree`` may +need to set the ``allocation_ratio`` and ``reserved`` values in the +``inv_data`` dict based on configuration to reflect changes on the compute +for allocation ratios and reserved resource amounts back to the placement +service. + Porting from get_traits ~~~~~~~~~~~~~~~~~~~~~~~ To replace ``get_traits``, developers should use the @@ -167,6 +203,33 @@ would become: .. code:: - def update_provider_tree(self, provider_tree, nodename): + def update_provider_tree(self, provider_tree, nodename, allocations=None): provider_tree.add_traits( nodename, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2', 'CUSTOM_GOLD') + +.. _taxonomy_of_traits_and_capabilities: + +Taxonomy of traits and capabilities +----------------------------------- + +There are various types of traits: + +- Some are standard (registered in + `os-traits `_); others + are custom. + +- Some are owned by the compute service; others can be managed by + operators. + +- Some come from driver-supported capabilities, via a mechanism which + was `introduced `_ to convert + them to standard traits on the compute node resource provider. This + mechanism is :ref:`documented in the configuration guide + `. + +This diagram may shed further light on how these traits relate to each +other and how they are managed. + +.. figure:: /_static/images/traits-taxonomy.svg + :width: 800 + :alt: Venn diagram showing taxonomy of traits and capabilities diff --git a/doc/source/reference/upgrade-checks.rst b/doc/source/reference/upgrade-checks.rst new file mode 100644 index 00000000000..c84ab26337f --- /dev/null +++ b/doc/source/reference/upgrade-checks.rst @@ -0,0 +1,266 @@ +============== +Upgrade checks +============== + +.. note:: + + This document details how to generate upgrade checks as part of a new + feature or bugfix. For info on how to apply existing upgrade checks, refer + to the documentation for the :program:`nova-status` command in + :doc:`/cli/nova-status`. For info on the general upgrade process for a nova + deployment, refer to :doc:`/admin/upgrades`. + +Nova provides automated :ref:`upgrade check tooling ` to +assist deployment tools in verifying critical parts of the deployment, +especially when it comes to major changes during upgrades that require operator +intervention. + +This guide covers the background on nova's upgrade check tooling, how it is +used, and what to look for in writing new checks. + + +Background +---------- + +Nova has historically supported offline database schema migrations +(:program:`nova-manage db sync` and :program:`nova-manage api_db sync`) and +online data migrations (:program:`nova-manage db online_data_migrations`) +during upgrades, as discussed in :doc:`/reference/database-migrations`. +The :program:`nova-status upgrade check` command was introduced in the 15.0.0 +(Ocata) release to aid in the verification of two major required changes in that +release, namely Placement and Cells v2. + +Integration with the Placement service and deploying Cells v2 was optional +starting in the 14.0.0 Newton release and made required in the Ocata release. +The nova team working on these changes knew that there were required deployment +changes to successfully upgrade to Ocata. In addition, the required deployment +changes were not things that could simply be verified in a database migration +script, e.g. a migration script should not make REST API calls to Placement. + +So ``nova-status upgrade check`` was written to provide an automated +"pre-flight" check to verify that required deployment steps were performed +prior to upgrading to Ocata. + +Reference the `Ocata changes`_ for implementation details. + +.. _Ocata changes: https://review.opendev.org/#/q/topic:bp/resource-providers-scheduler-db-filters+status:merged+file:%255Enova/cmd/status.py + + +Guidelines +---------- + +* The checks should be able to run within a virtual environment or container. + All that is required is a full configuration file, similar to running other + ``nova-manage`` type administration commands. In the case of nova, this + means having :oslo.config:group:`api_database`, + :oslo.config:group:`placement`, etc sections configured. + +* Candidates for automated upgrade checks are things in a project's upgrade + release notes which can be verified via the database. For example, when + upgrading to Cells v2 in Ocata, one required step was creating + "cell mappings" for ``cell0`` and ``cell1``. This can easily be verified by + checking the contents of the ``cell_mappings`` table in the ``nova_api`` + database. + +* Checks will query the database(s) and potentially REST APIs (depending on the + check) but should not expect to run RPC calls. For example, a check should + not require that the ``nova-compute`` service is running on a particular + host. + +* Checks are typically meant to be run before re-starting and upgrading to new + service code, which is how `grenade uses them`__, but they can also be run + as a :ref:`post-install verify step ` which is + how `openstack-ansible`__ also uses them. The high-level set of upgrade steps + for upgrading nova in grenade is: + + * Install new code + * Sync the database schema for new models + (``nova-manage api_db sync``; ``nova-manage db sync``) + * Run the online data migrations (``nova-manage db online_data_migrations``) + * Run the upgrade check (``nova-status upgrade check``) + * Restart services with new code + + .. __: https://github.com/openstack-dev/grenade/blob/dc7f4a4ba/projects/60_nova/upgrade.sh#L96 + .. __: https://review.opendev.org/#/c/575125/ + +* Checks must be idempotent so they can be run repeatedly and the results are + always based on the latest data. This allows an operator to run the checks, + fix any issues reported, and then iterate until the status check no longer + reports any issues. + +* Checks which cannot easily, or should not, be run within offline database + migrations are a good candidate for these CLI-driven checks. For example, + ``instances`` records are in the cell database and for each instance there + should be a corresponding ``request_specs`` table entry in the ``nova_api`` + database. A ``nova-manage db online_data_migrations`` routine was added in + the Newton release to back-fill request specs for existing instances, and + `in Rocky`__ an upgrade check was added to make sure all non-deleted + instances have a request spec so compatibility code can be removed in Stein. + In older releases of nova we would have added a `blocker migration`__ as part + of the database schema migrations to make sure the online data migrations had + been completed before the upgrade could proceed. + + .. note:: + + Usage of ``nova-status upgrade check`` does not preclude the need + for blocker migrations within a given database, but in the case of + request specs the check spans multiple databases and was a better + fit for the ``nova-status`` tooling. + + .. __: https://review.opendev.org/#/c/581813/ + .. __: https://review.opendev.org/#/c/289450/ + +* All checks should have an accompanying upgrade release note. + + +Structure +--------- + +There is no graph logic for checks, meaning each check is meant to be run +independently of other checks in the same set. For example, a project could +have five checks which run serially but that does not mean the second check +in the set depends on the results of the first check in the set, or the +third check depends on the second, and so on. + +The base framework is fairly simple as can be seen from the `initial change`_. +Each check is registered in the ``_upgrade_checks`` variable and the ``check`` +method executes each check and records the result. The most severe result is +recorded for the final return code. + +There are one of three possible results per check: + +* ``Success``: All upgrade readiness checks passed successfully and there is + nothing to do. +* ``Warning``: At least one check encountered an issue and requires further + investigation. This is considered a warning but the upgrade may be OK. +* ``Failure``: There was an upgrade status check failure that needs to be + investigated. This should be considered something that stops an upgrade. + +The ``UpgradeCheckResult`` object provides for adding details when there +is a warning or failure result which generally should refer to how to resolve +the failure, e.g. maybe ``nova-manage db online_data_migrations`` is +incomplete and needs to be run again. + +Using the `cells v2 check`_ as an example, there are really two checks +involved: + +1. Do the cell0 and cell1 mappings exist? +2. Do host mappings exist in the API database if there are compute node + records in the cell database? + +Failing either check results in a ``Failure`` status for that check and return +code of ``2`` for the overall run. + +The initial `placement check`_ provides an example of a warning response. In +that check, if there are fewer resource providers in Placement than there are +compute nodes in the cell database(s), the deployment may be underutilized +because the ``nova-scheduler`` is using the Placement service to determine +candidate hosts for scheduling. + +Warning results are good for cases where scenarios are known to run through +a rolling upgrade process, e.g. ``nova-compute`` being configured to report +resource provider information into the Placement service. These are things +that should be investigated and completed at some point, but might not cause +any immediate failures. + +The results feed into a standard output for the checks: + +.. code-block:: console + + $ nova-status upgrade check + +----------------------------------------------------+ + | Upgrade Check Results | + +----------------------------------------------------+ + | Check: Cells v2 | + | Result: Success | + | Details: None | + +----------------------------------------------------+ + | Check: Placement API | + | Result: Failure | + | Details: There is no placement-api endpoint in the | + | service catalog. | + +----------------------------------------------------+ + +.. _initial change: https://review.opendev.org/#/c/411517/ +.. _cells v2 check: https://review.opendev.org/#/c/411525/ +.. _placement check: https://review.opendev.org/#/c/413250/ + + +FAQs +---- + +- How is the ``nova-status`` upgrade script packaged and deployed? + + There is a ``console_scripts`` entry for ``nova-status`` in the ``setup.cfg`` + file. + +- Why are there multiple parts to the command structure, i.e. "upgrade" and + "check"? + + This is an artifact of how the ``nova-manage`` command is structured which + has categories of sub-commands, like ``nova-manage db`` is a sub-category + made up of other sub-commands like ``nova-manage db sync``. The + ``nova-status upgrade check`` command was written in the same way for + consistency and extensibility if other sub-commands need to be added later. + +- Why is the upgrade check command not part of the standard python-\*client + CLIs? + + The ``nova-status`` command was modeled after the ``nova-manage`` command + which is meant to be admin-only and has direct access to the database, + unlike other CLI packages like python-novaclient which requires a token + and communicates with nova over the REST API. Because of this, it is also + possible to write commands in ``nova-manage`` and ``nova-status`` that can + work while the API service is down for maintenance. + +- How should the checks be documented? + + Each check should be documented in the :ref:`history section + ` of the CLI guide and have a release note. This is + important since the checks can be run in an isolated environment apart from + the actual deployed version of the code and since the checks should be + idempotent, the history / change log is good for knowing what is being + validated. + +- Do other projects support upgrade checks? + + A community-wide `goal for the Stein release`__ is adding the same type of + ``$PROJECT-status upgrade check`` tooling to other projects to ease in + upgrading OpenStack across the board. So while the guidelines in this + document are primarily specific to nova, they should apply generically to + other projects wishing to incorporate the same tooling. + + .. __: https://governance.openstack.org/tc/goals/stein/upgrade-checkers.html + +- Where should the documentation live for projects other than nova? + + As part of the standard OpenStack project `documentation guidelines`__ the + command should be documented under ``doc/source/cli`` in each project repo. + + .. __: https://docs.openstack.org/doc-contrib-guide/project-guides.html + +- Can upgrade checks be backported? + + Sometimes upgrade checks can be backported to aid in pre-empting bugs on + stable branches. For example, a check was added for `bug 1759316`__ in Rocky + which was also backported to stable/queens in case anyone upgrading from Pike + to Queens would hit the same issue. Backportable checks are generally only + made for latent bugs since someone who has already passed checks and upgraded + to a given stable branch should not start failing after a patch release on + that same branch. For this reason, any check being backported should have a + release note with it. + + .. __: https://bugs.launchpad.net/nova/+bug/1759316 + +- Can upgrade checks only be for N-1 to N version upgrades? + + No, not necessarily. The upgrade checks are also an essential part of + `fast-forward upgrades`__ to make sure that as you roll through each release + performing schema (data model) updates and data migrations that you are + also completing all of the necessary changes. For example, if you are + fast forward upgrading from Ocata to Rocky, something could have been + added, deprecated or removed in Pike or Queens and a pre-upgrade check is + a way to make sure the necessary steps were taking while upgrading through + those releases before restarting the Rocky code at the end. + + .. __: https://wiki.openstack.org/wiki/Fast_forward_upgrades diff --git a/doc/source/reference/vm-states.rst b/doc/source/reference/vm-states.rst index f9d6ae554ef..5a0dc5f6627 100644 --- a/doc/source/reference/vm-states.rst +++ b/doc/source/reference/vm-states.rst @@ -87,7 +87,7 @@ resume Suspended N/A Active rescue Active, Shutoff Resize Verify, unset Rescued unrescue Rescued N/A Active set admin password Active N/A Active -rebuild Active, Shutoff Resize Verify, unset Active +rebuild Active, Shutoff Resize Verify, unset Active, Shutoff force delete Soft Deleted N/A Deleted restore Soft Deleted N/A Active soft delete Active, Shutoff, N/A Soft Deleted diff --git a/doc/source/user/aggregates.rst b/doc/source/user/aggregates.rst deleted file mode 100644 index c76807d5df8..00000000000 --- a/doc/source/user/aggregates.rst +++ /dev/null @@ -1,139 +0,0 @@ -.. - Copyright 2012 OpenStack Foundation - Copyright 2012 Citrix Systems, Inc. - Copyright 2012, The Cloudscaling Group, Inc. - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Host Aggregates -=============== - -Host aggregates can be regarded as a mechanism to further partition an -availability zone; while availability zones are visible to users, host -aggregates are only visible to administrators. Host aggregates started out as -a way to use Xen hypervisor resource pools, but have been generalized to provide -a mechanism to allow administrators to assign key-value pairs to groups of -machines. Each node can have multiple aggregates, each aggregate can have -multiple key-value pairs, and the same key-value pair can be assigned to -multiple aggregates. This information can be used in the scheduler to enable -advanced scheduling, to set up Xen hypervisor resource pools or to define -logical groups for migration. For more information, including an example of -associating a group of hosts to a flavor, see :ref:`host-aggregates`. - - -Availability Zones (AZs) ------------------------- - -Availability Zones are the end-user visible logical abstraction for -partitioning a cloud without knowing the physical infrastructure. -That abstraction doesn't come up in Nova with an actual database model since -the availability zone is actually a specific metadata information attached to -an aggregate. Adding that specific metadata to an aggregate makes the aggregate -visible from an end-user perspective and consequently allows to schedule upon a -specific set of hosts (the ones belonging to the aggregate). - -That said, there are a few rules to know that diverge from an API perspective -between aggregates and availability zones: - -- one host can be in multiple aggregates, but it can only be in one - availability zone -- by default a host is part of a default availability zone even if it doesn't - belong to an aggregate (the configuration option is named - ``default_availability_zone``) - -.. warning:: That last rule can be very error-prone. Since the user can see the - list of availability zones, they have no way to know whether the default - availability zone name (currently *nova*) is provided because an host - belongs to an aggregate whose AZ metadata key is set to *nova*, or because - there is at least one host not belonging to any aggregate. Consequently, it is - highly recommended for users to never ever ask for booting an instance by - specifying an explicit AZ named *nova* and for operators to never set the - AZ metadata for an aggregate to *nova*. That leads to some problems - due to the fact that the instance AZ information is explicitly attached to - *nova* which could break further move operations when either the host is - moved to another aggregate or when the user would like to migrate the - instance. - -.. note:: Availability zone name must NOT contain ':' since it is used by admin - users to specify hosts where instances are launched in server creation. - See :doc:`Select hosts where instances are launched ` for more detail. - -There is a nice educational video about availability zones from the Rocky -summit which can be found here: https://www.openstack.org/videos/vancouver-2018/curse-your-bones-availability-zones-1 - -Design ------- - -The OSAPI Admin API is extended to support the following operations: - -* Aggregates - - * list aggregates: returns a list of all the host-aggregates - * create aggregate: creates an aggregate, takes a friendly name, etc. returns an id - * show aggregate: shows the details of an aggregate (id, name, availability_zone, hosts and metadata) - * update aggregate: updates the name and availability zone of an aggregate - * set metadata: sets the metadata on an aggregate to the values supplied - * delete aggregate: deletes an aggregate, it fails if the aggregate is not empty - * add host: adds a host to the aggregate - * remove host: removes a host from the aggregate -* Hosts - - * list all hosts by service - - * It has been deprecated since microversion 2.43. Use `list hypervisors` instead. - * start host maintenance (or evacuate-host): disallow a host to serve API requests and migrate instances to other hosts of the aggregate - - * It has been deprecated since microversion 2.43. Use `disable service` instead. - * stop host maintenance (or rebalance-host): put the host back into operational mode, migrating instances back onto that host - - * It has been deprecated since microversion 2.43. Use `enable service` instead. - -* Hypervisors - - * list hypervisors: list hypervisors with hypervisor hostname - -* Compute services - - * enable service - * disable service - -Using the Nova CLI ------------------- - -Using the nova command you can create, delete and manage aggregates. The following section outlines the list of available commands. - -Usage -~~~~~ - -:: - - * aggregate-list Print a list of all aggregates. - * aggregate-create [] Create a new aggregate with the specified details. - * aggregate-delete Delete the aggregate by its ID or name. - * aggregate-show Show details of the aggregate specified by its ID or name. - * aggregate-add-host Add the host to the aggregate specified by its ID or name. - * aggregate-remove-host Remove the specified host from the aggregate specified by its ID or name. - * aggregate-set-metadata [ ...] - Update the metadata associated with the aggregate specified by its ID or name. - * aggregate-update [--name ] [--availability-zone ] - Update the aggregate's name or availability zone. - - * host-list List all hosts by service. - * hypervisor-list [--matching ] [--marker ] [--limit ] - List hypervisors. - - * host-update [--status ] [--maintenance ] - Put/resume host into/from maintenance. - * service-enable Enable the service. - * service-disable [--reason ] Disable the service. diff --git a/doc/source/user/architecture.rst b/doc/source/user/architecture.rst index 39871af24f1..c4d597b86fe 100644 --- a/doc/source/user/architecture.rst +++ b/doc/source/user/architecture.rst @@ -18,7 +18,7 @@ Nova System Architecture ======================== -Nova is comprised of multiple server processes, each performing different +Nova comprises multiple server processes, each performing different functions. The user-facing interface is a REST API, while internally Nova components communicate via an RPC message passing mechanism. @@ -59,6 +59,6 @@ of a typical Nova deployment. * Compute: manages communication with hypervisor and virtual machines. * Conductor: handles requests that need coordination (build/resize), acts as a database proxy, or handles object conversions. -* `Placement `__: tracks resource provider inventories and usages. +* :placement-doc:`Placement <>`: tracks resource provider inventories and usages. While all services are designed to be horizontally scalable, you should have significantly more computes than anything else. diff --git a/doc/source/user/availability-zones.rst b/doc/source/user/availability-zones.rst new file mode 100644 index 00000000000..889da64968d --- /dev/null +++ b/doc/source/user/availability-zones.rst @@ -0,0 +1,31 @@ +================== +Availability zones +================== + +Availability Zones are an end-user visible logical abstraction for partitioning +a cloud without knowing the physical infrastructure. Availability zones can be +used to partition a cloud on arbitrary factors, such as location (country, +datacenter, rack), network layout and/or power source. Because of the +flexibility, the names and purposes of availability zones can vary massively +between clouds. + +In addition, other services, such as the :neutron-doc:`networking service <>` +and the :cinder-doc:`block storage service <>`, also provide an availability +zone feature. However, the implementation of these features differs vastly +between these different services. Consult the documentation for these other +services for more information on their implementation of this feature. + + +Usage +----- + +Availability zones can only be created and configured by an admin but they can +be used by an end-user when creating an instance. For example: + +.. code-block:: console + + $ openstack server create --availability-zone ZONE ... SERVER + +It is also possible to specify a destination host and/or node using this +command; however, this is an admin-only operation by default. For more +information, see :ref:`using-availability-zones-to-select-hosts`. diff --git a/doc/source/user/block-device-mapping.rst b/doc/source/user/block-device-mapping.rst index 4517a5bf9a1..b43f01de8b4 100644 --- a/doc/source/user/block-device-mapping.rst +++ b/doc/source/user/block-device-mapping.rst @@ -48,9 +48,18 @@ When we talk about block device mapping, we usually refer to one of two things virt driver code). We will refer to this format as 'Driver BDMs' from now on. + For more details on this please refer to the :doc:`Driver BDM Data + Structures <../reference/block-device-structs>` refernce document. -Data format and its history ----------------------------- +.. note:: + + The maximum limit on the number of disk devices allowed to attach to + a single server is configurable with the option + :oslo.config:option:`compute.max_disk_devices_to_attach`. + + +API BDM data format and its history +----------------------------------- In the early days of Nova, block device mapping general structure closely mirrored that of the EC2 API. During the Havana release of Nova, block device @@ -127,7 +136,7 @@ fields (in addition to the ones that were already there): * `snapshot` * `blank` -* dest_type - this can have one of the following values: +* destination_type - this can have one of the following values: * `local` * `volume` @@ -161,10 +170,18 @@ fields (in addition to the ones that were already there): usage is to set it to 0 for the boot device and leave it as None for any other devices. -Valid source / dest combinations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +* volume_type - Added in microversion 2.67 to the servers create API to + support specifying volume type when booting instances. When we snapshot a + volume-backed server, the block_device_mapping_v2 image metadata will + include the volume_type from the BDM record so if the user then creates + another server from that snapshot, the volume that nova creates from that + snapshot will use the same volume_type. If a user wishes to change that + volume type in the image metadata, they can do so via the image API. -Combination of the ``source_type`` and ``dest_type`` will define the +Valid source / destination combinations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Combination of the ``source_type`` and ``destination_type`` will define the kind of block device the entry is referring to. The following combinations are supported: @@ -206,3 +223,21 @@ mapping is valid before accepting a boot request. .. [3] This is a feature that the EC2 API offers as well and has been in Nova for a long time, although it has been broken in several releases. More info can be found on `this bug ` + + +FAQs +---- + +1. Is it possible to configure nova to automatically use cinder to back all + root disks with volumes? + + No, there is nothing automatic within nova that converts a + non-:term:`boot-from-volume ` request to convert the + image to a root volume. Several ideas have been discussed over time which + are captured in the spec for `volume-backed flavors`_. However, if you wish + to force users to always create volume-backed servers, you can configure + the API service by setting :oslo.config:option:`max_local_block_devices` + to 0. This will result in any non-boot-from-volume server create request to + fail with a 400 response. + +.. _volume-backed flavors: https://review.opendev.org/511965/ diff --git a/doc/source/user/cells.rst b/doc/source/user/cells.rst index 37c830c08be..74d6fc1a3c0 100644 --- a/doc/source/user/cells.rst +++ b/doc/source/user/cells.rst @@ -20,54 +20,7 @@ Before reading further, there is a nice overview presentation_ that Andrew Laski gave at the Austin (Newton) summit which is worth watching. -.. _presentation: https://www.openstack.org/videos/video/nova-cells-v2-whats-going-on - -Cells V1 -======== - -Historically, Nova has depended on a single logical database and message queue -that all nodes depend on for communication and data persistence. This becomes -an issue for deployers as scaling and providing fault tolerance for these -systems is difficult. - -We have an experimental feature in Nova called "cells", hereafter referred to -as "cells v1", which is used by some large deployments to partition compute -nodes into smaller groups, coupled with a database and queue. This seems to be -a well-liked and easy-to-understand arrangement of resources, but the -implementation of it has issues for maintenance and correctness. -See `Comparison with Cells V1`_ for more detail. - -Status -~~~~~~ - -.. deprecated:: 16.0.0 - Cells v1 is deprecated in favor of Cells v2 as of the 16.0.0 Pike release. - -Cells v1 is considered experimental and receives much less testing than the -rest of Nova. For example, there is no job for testing cells v1 with Neutron. - -The priority for the core team is implementation of and migration to cells v2. -Because of this, there are a few restrictions placed on cells v1: - -#. Cells v1 is in feature freeze. This means no new feature proposals for cells - v1 will be accepted by the core team, which includes but is not limited to - API parity, e.g. supporting virtual interface attach/detach with Neutron. -#. Latent bugs caused by the cells v1 design will not be fixed, e.g. - `bug 1489581 `_. So if new - tests are added to Tempest which trigger a latent bug in cells v1 it may not - be fixed. However, regressions in working function should be tracked with - bugs and fixed. - -**Suffice it to say, new deployments of cells v1 are not encouraged.** - -The restrictions above are basically meant to prioritize effort and focus on -getting cells v2 completed, and feature requests and hard to fix latent bugs -detract from that effort. Further discussion on this can be found in the -`2015/11/12 Nova meeting minutes -`_. - -There are no plans to remove Cells V1 until V2 is usable by existing -deployments and there is a migration path. +.. _presentation: https://www.openstack.org/videos/summits/austin-2016/nova-cells-v2-whats-going-on .. _cells-v2: @@ -77,6 +30,10 @@ Cells V2 * `Newton Summit Video - Nova Cells V2: What's Going On? `_ * `Pike Summit Video - Scaling Nova: How CellsV2 Affects Your Deployment `_ * `Queens Summit Video - Add Cellsv2 to your existing Nova deployment `_ +* `Rocky Summit Video - Moving from CellsV1 to CellsV2 at CERN + `_ +* `Stein Summit Video - Scaling Nova with CellsV2: The Nova Developer and the CERN Operator perspective + `_ Manifesto ~~~~~~~~~ @@ -111,10 +68,11 @@ always be much smaller than the number of instances. There are availability implications with this change since something like a 'nova list' which might query multiple cells could end up with a partial result -if there is a database failure in a cell. A database failure within a cell -would cause larger issues than a partial list result so the expectation is that -it would be addressed quickly and cellsv2 will handle it by indicating in the -response that the data may not be complete. +if there is a database failure in a cell. See :doc:`/admin/cells` for knowing +more about the recommended practices under such situations. A database failure +within a cell would cause larger issues than a partial list result so the +expectation is that it would be addressed quickly and cellsv2 will handle it by +indicating in the response that the data may not be complete. Since this is very similar to what we have with current cells, in terms of organization of resources, we have decided to call this "cellsv2" for @@ -155,23 +113,6 @@ The benefits of this new organization are: * Adding new sets of hosts as a new "cell" allows them to be plugged into a deployment and tested before allowing builds to be scheduled to them. -Comparison with Cells V1 ------------------------- - -In reality, the proposed organization is nearly the same as what we currently -have in cells today. A cell mostly consists of a database, queue, and set of -compute nodes. The primary difference is that current cells require a -nova-cells service that synchronizes information up and down from the top level -to the child cell. Additionally, there are alternate code paths in -compute/api.py which handle routing messages to cells instead of directly down -to a compute host. Both of these differences are relevant to why we have a hard -time achieving feature and test parity with regular nova (because many things -take an alternate path with cells) and why it's hard to understand what is -going on (all the extra synchronization of data). The new proposed cellsv2 -organization avoids both of these problems by letting things live where they -should, teaching nova to natively find the right db, queue, and compute node to -handle a given request. - Database split ~~~~~~~~~~~~~~ @@ -245,6 +186,14 @@ scheduled are relegated to the cell0 database, which is effectively a graveyard of instances that failed to start. All successful/running instances are stored in "cell1". + +.. note:: Since Nova services make use of both configuration file and some + databases records, starting or restarting those services with an + incomplete configuration could lead to an incorrect deployment. + Please only restart the services once you are done with the described + steps below. + + First Time Setup ~~~~~~~~~~~~~~~~ @@ -493,7 +442,7 @@ database yet. This will set up a single cell Nova deployment. matches the transport URL for the cell created in step 5, and start the nova-compute service. Before step 7, make sure you have compute hosts in the database by running:: - + nova service-list --binary nova-compute 7. Run the ``discover_hosts`` command to map compute hosts to the single cell @@ -554,6 +503,9 @@ database. This will set up a single cell Nova deployment. Upgrade with Cells V1 ~~~~~~~~~~~~~~~~~~~~~ +.. todo:: This needs to be removed but `Adding a new cell to an existing deployment`_ + is still using it. + You are upgrading an existing Nova install that has Cells V1 enabled and have compute hosts in your databases. This will set up a multiple cell Nova deployment. At this time, it is recommended to keep Cells V1 enabled during and @@ -743,3 +695,65 @@ FAQs to restart the scheduler process to refresh the cache, or send a SIGHUP signal to the scheduler by which it will automatically refresh the cells cache and the changes will take effect. + +#. Why was the cells REST API not implemented for CellsV2? Why are + there no CRUD operations for cells in the API? + + One of the deployment challenges that CellsV1 had was the + requirement for the API and control services to be up before a new + cell could be deployed. This was not a problem for large-scale + public clouds that never shut down, but is not a reasonable + requirement for smaller clouds that do offline upgrades and/or + clouds which could be taken completely offline by something like a + power outage. Initial devstack and gate testing for CellsV1 was + delayed by the need to engineer a solution for bringing the services + partially online in order to deploy the rest, and this continues to + be a gap for other deployment tools. Consider also the FFU case + where the control plane needs to be down for a multi-release + upgrade window where changes to cell records have to be made. This + would be quite a bit harder if the way those changes are made is + via the API, which must remain down during the process. + + Further, there is a long-term goal to move cell configuration + (i.e. cell_mappings and the associated URLs and credentials) into + config and get away from the need to store and provision those + things in the database. Obviously a CRUD interface in the API would + prevent us from making that move. + +#. Why are cells not exposed as a grouping mechanism in the API for + listing services, instances, and other resources? + + Early in the design of CellsV2 we set a goal to not let the cell + concept leak out of the API, even for operators. Aggregates are the + way nova supports grouping of hosts for a variety of reasons, and + aggregates can cut across cells, and/or be aligned with them if + desired. If we were to support cells as another grouping mechanism, + we would likely end up having to implement many of the same + features for them as aggregates, such as scheduler features, + metadata, and other searching/filtering operations. Since + aggregates are how Nova supports grouping, we expect operators to + use aggregates any time they need to refer to a cell as a group of + hosts from the API, and leave actual cells as a purely + architectural detail. + + The need to filter instances by cell in the API can and should be + solved by adding a generic by-aggregate filter, which would allow + listing instances on hosts contained within any aggregate, + including one that matches the cell boundaries if so desired. + +#. Why are the API responses for ``GET /servers``, ``GET /servers/detail``, + ``GET /servers/{server_id}`` and ``GET /os-services`` missing some + information for certain cells at certain times? Why do I see the status as + "UNKNOWN" for the servers in those cells at those times when I run + ``openstack server list`` or ``openstack server show``? + + Starting from microversion 2.69 the API responses of ``GET /servers``, + ``GET /servers/detail``, ``GET /servers/{server_id}`` and + ``GET /os-services`` may contain missing keys during down cell situations. + See the `Handling Down Cells + `__ + section of the Compute API guide for more information on the partial + constructs. + + For administrative considerations, see + :ref:`Handling cell failures `. diff --git a/doc/source/user/cellsv2-layout.rst b/doc/source/user/cellsv2-layout.rst index a9c6adc0d32..945770c6658 100644 --- a/doc/source/user/cellsv2-layout.rst +++ b/doc/source/user/cellsv2-layout.rst @@ -22,12 +22,6 @@ is geared towards people who want to have multiple cells for whatever reason, the nature of the cellsv2 support in Nova means that it applies in some way to all deployments. -.. note:: The concepts laid out in this document do not in any way - relate to CellsV1, which includes the ``nova-cells`` - service, and the ``[cells]`` section of the configuration - file. For more information on the differences, see the main - :ref:`cells` page. - Concepts ======== @@ -266,9 +260,13 @@ database. This means that a multi-cell environment may incorrectly calculate the usage of a tenant if one of the cells is unreachable, as those resources cannot be counted. In this case, the tenant may be able to consume more resource from one of the available cells, putting -them far over quota when the unreachable cell returns. In the future, -placement will provide us with a consistent way to calculate usage -independent of the actual cell being reachable. +them far over quota when the unreachable cell returns. + +.. note:: Starting in the Train (20.0.0) release, it is possible to configure + counting of quota usage from the placement service and API database + to make quota usage calculations resilient to down or poor-performing + cells in a multi-cell environment. See the + :doc:`quotas documentation` for more details. Performance of listing instances ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -291,25 +289,49 @@ documentation ` for more details. +.. _cells-v2-layout-metadata-api: + Nova Metadata API service ~~~~~~~~~~~~~~~~~~~~~~~~~ -The Nova metadata API service should be global across all cells, and -thus be configured as an API-level service with access to the -``[api_database]/connection`` information. The nova metadata API service must -not be run as a standalone service (e.g. must not be run via the -nova-api-metadata script). +Starting from the Stein release, the :doc:`nova metadata API service +` can be run either globally or per cell using the +:oslo.config:option:`api.local_metadata_per_cell` configuration option. + +**Global** + +If you have networks that span cells, you might need to run Nova metadata API +globally. When running globally, it should be configured as an API-level +service with access to the :oslo.config:option:`api_database.connection` +information. The nova metadata API service **must not** be run as a standalone +service, using the :program:`nova-api-metadata` service, in this case. + +**Local per cell** -Consoleauth service and console proxies -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Running Nova metadata API per cell can have better performance and data +isolation in a multi-cell deployment. If your networks are segmented along +cell boundaries, then you can run Nova metadata API service per cell. If you +choose to run it per cell, you should also configure each +:neutron-doc:`neutron-metadata-agent +` service to +point to the corresponding :program:`nova-api-metadata`. The nova metadata API +service **must** be run as a standalone service, using the +:program:`nova-api-metadata` service, in this case. -`As of Rocky`__, the ``nova-consoleauth`` service has been deprecated and cell -databases are used for storing token authorizations. All new consoles will be -supported by the database backend and existing consoles will be reset. Console -proxies must be run per cell because the new console token authorizations are -stored in cell databases. -.. __: https://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/convert-consoles-to-objects.html +Console proxies +~~~~~~~~~~~~~~~ + +`Starting from the Rocky release`__, console proxies must be run per cell +because console token authorizations are stored in cell databases. This means +that each console proxy server must have access to the +:oslo.config:option:`database.connection` information for the cell database +containing the instances for which it is proxying console access. + +.. __: https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/convert-consoles-to-objects.html + + +.. _upcall: Operations Requiring upcalls ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -335,6 +357,8 @@ following: #. Attaching a volume and ``[cinder]/cross_az_attach=False`` #. Instance reschedules during boot and resize (part 2) + .. note:: This has been resolved in the Ussuri release [#]_ [#]_. + The first is simple: if you boot an instance, it gets scheduled to a compute node, fails, it would normally be re-scheduled to another node. That requires scheduler intervention and thus it will not work @@ -352,12 +376,12 @@ affinity check, you should set ``[workarounds]/disable_group_policy_check_upcall=True`` and ``[filter_scheduler]/track_instance_changes=False`` in ``nova.conf``. -The fourth is currently only a problem when performing live migrations -using the XenAPI driver and not specifying ``--block-migrate``. The -driver will attempt to figure out if block migration should be performed -based on source and destination hosts being in the same aggregate. Since -aggregates data has migrated to the API database, the cell conductor will -not be able to access the aggregate information and will fail. +The fourth was previously only a problem when performing live migrations using +the since-removed XenAPI driver and not specifying ``--block-migrate``. The +driver would attempt to figure out if block migration should be performed based +on source and destination hosts being in the same aggregate. Since aggregates +data had migrated to the API database, the cell conductor would not be able to +access the aggregate information and would fail. The fifth is a problem because when a volume is attached to an instance in the *nova-compute* service, and ``[cinder]/cross_az_attach=False`` in @@ -367,10 +391,11 @@ Since the aggregates are in the API database and the cell conductor cannot access that information, so this will fail. In the future this check could be moved to the *nova-api* service such that the availability zone between the instance and the volume is checked before we reach the cell, except in the -case of boot from volume where the *nova-compute* service itself creates the -volume and must tell Cinder in which availability zone to create the volume. -Long-term, volume creation during boot from volume should be moved to the -top-level superconductor which would eliminate this AZ up-call check problem. +case of :term:`boot from volume ` where the *nova-compute* +service itself creates the volume and must tell Cinder in which availability +zone to create the volume. Long-term, volume creation during boot from volume +should be moved to the top-level superconductor which would eliminate this AZ +up-call check problem. The sixth is detailed in `bug 1781286`_ and similar to the first issue. The issue is that servers created without a specific availability zone @@ -383,3 +408,5 @@ the API DB. .. [#] https://blueprints.launchpad.net/nova/+spec/efficient-multi-cell-instance-list-and-sort .. [#] https://specs.openstack.org/openstack/nova-specs/specs/queens/approved/return-alternate-hosts.html .. [#] https://blueprints.launchpad.net/nova/+spec/live-migration-in-xapi-pool +.. [#] https://review.opendev.org/686047/ +.. [#] https://review.opendev.org/686050/ diff --git a/doc/source/user/certificate-validation.rst b/doc/source/user/certificate-validation.rst index 1b18c535cc1..11407121592 100644 --- a/doc/source/user/certificate-validation.rst +++ b/doc/source/user/certificate-validation.rst @@ -29,7 +29,7 @@ create or rebuild commands, signature verification and certificate validation will be performed, regardless of their settings in the Nova configurations. See `Using Signature Verification`_ for details. -.. _Cursive: http://git.openstack.org/cgit/openstack/cursive/ +.. _Cursive: http://opendev.org/x/cursive/ .. _Glance Image Signature Verification documentation: https://docs.openstack.org/glance/latest/user/signature.html .. note:: @@ -59,8 +59,15 @@ Limitations See the `feature support matrix`_ for information on which drivers support the feature at any given release. +* As of the 18.0.0 Rocky release, image signature and trusted image + certification validation is not supported with the Libvirt compute driver + when using the ``rbd`` image backend (``[libvirt]/images_type=rbd``) and + ``RAW`` formatted images. This is due to the images being cloned directly in + the ``RBD`` backend avoiding calls to download and verify on the compute. + * As of the 18.0.0 Rocky release, trusted image certification validation is - not supported with volume-backed (boot from volume) instances. The block + not supported with volume-backed + (:term:`boot from volume `) instances. The block storage service support may be available in a future release: https://blueprints.launchpad.net/cinder/+spec/certificate-validate @@ -138,8 +145,8 @@ Certificate validation is triggered by one of two ways: 2. A list of trusted certificate IDs is provided by one of three ways: .. note:: The command line support is pending changes - https://review.openstack.org/#/c/500396/ and - https://review.openstack.org/#/c/501926/ to python-novaclient and + https://review.opendev.org/#/c/500396/ and + https://review.opendev.org/#/c/501926/ to python-novaclient and python-openstackclient, respectively. Environment Variable @@ -539,7 +546,7 @@ Save off the certificate UUIDs (found in the secret href): $ cert_ca_uuid=8fbcce5d-d646-4295-ba8a-269fc9451eeb $ cert_intermediate_a_uuid=0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8 $ cert_intermediate_b_uuid=674736e3-f25c-405c-8362-bbf991e0ce0a - $ cert_client_uuid=fab1d219-5df8-4c20-8401-7985008c3dbc + $ cert_client_uuid=125e6199-2de4-46e3-b091-8e2401ef0d63 Create a signed image @@ -582,50 +589,55 @@ Save off the base64 encoded signature: Upload the signed image to Glance: -.. TODO: Change this example to use "openstack image create". - .. code-block:: console - $ glance image-create \ - --property name=cirros_client_signedImage \ - --property is-public=true \ + $ openstack image create \ + --public \ --container-format bare \ --disk-format qcow2 \ --property img_signature="$base64_signature" \ --property img_signature_certificate_uuid="$cert_client_uuid" \ --property img_signature_hash_method='SHA-256' \ --property img_signature_key_type='RSA-PSS' \ - --file cirros.tar.gz - - +--------------------------------+----------------------------------------------------------------------------------+ - | Property | Value | - +--------------------------------+----------------------------------------------------------------------------------+ - | checksum | bc7f8676e345b3ff6411235343f8bad1 | - | container_format | bare | - | created_at | 2018-02-19T16:39:06Z | - | disk_format | qcow2 | - | id | d5bdf259-8ef7-4d32-8baf-eb3d73848627 | - | img_signature | TUqhAq1lXopY7U1kgXJVL5W37lIp49yeikpGyX2Ga6mni3uYBoPi428ILRizXT25u26PMVynjmuxgJyM | - | | zMsb//1Bs0Th+58vlzo4O3Q+axYgRXnHT6d+Lq/AXUAA3U+7NDNmrJXRT1bRwkeiv6j952ExaDpamiAg | - | | 0xvYTQDJB6j/O24hwUdx7ddx6/zrTHqq3aaff2dkKFJs8TMKit/uLutwngaL4dIz0ZJMrPjcQ1K4nZRj | - | | Fm3UYfSomSypr4BWb2s2OZsaOhEh5OSZNDIMl5ca8fJXAx357qg6Ox7q/pYZCrVh4W/a6QbqHCg3R/6n | - | | +0kvKTqsOYj0JOnKO7JVlA== | - | img_signature_certificate_uuid | fab1d219-5df8-4c20-8401-7985008c3dbc | - | img_signature_hash_method | SHA-256 | - | img_signature_key_type | RSA-PSS | - | is-public | true | - | min_disk | 0 | - | min_ram | 0 | - | name | cirros_client_signedImage | - | owner | 40ead7a81dc64f9fb6779984745ac1c2 | - | protected | False | - | size | 434333 | - | status | active | - | tags | [] | - | updated_at | 2018-02-19T16:39:06Z | - | virtual_size | None | - | visibility | shared | - +--------------------------------+----------------------------------------------------------------------------------+ + --file cirros.tar.gz \ + cirros_client_signedImage + + +------------------+------------------------------------------------------------------------+ + | Field | Value | + +------------------+------------------------------------------------------------------------+ + | checksum | d41d8cd98f00b204e9800998ecf8427e | + | container_format | bare | + | created_at | 2019-02-06T06:29:56Z | + | disk_format | qcow2 | + | file | /v2/images/17f48a6c-e592-446e-9c91-00fbc436d47e/file | + | id | 17f48a6c-e592-446e-9c91-00fbc436d47e | + | min_disk | 0 | + | min_ram | 0 | + | name | cirros_client_signedImage | + | owner | 45e13e63606f40d6b23275c3cd91aec2 | + | properties | img_signature='swA/hZi3WaNh35VMGlnfGnBWuXMlUbdO8h306uG7W3nwOyZP6dGRJ3 | + | | Xoi/07Bo2dMUB9saFowqVhdlW5EywQAK6vgDsi9O5aItHM4u7zUPw+2e8eeaIoHlGhTks | + | | kmW9isLy0mYA9nAfs3coChOIPXW4V8VgVXEfb6VYGHWm0nShiAP1e0do9WwitsE/TVKoS | + | | QnWjhggIYij5hmUZ628KAygPnXklxVhqPpY/dFzL+tTzNRD0nWAtsc5wrl6/8HcNzZsaP | + | | oexAysXJtcFzDrf6UQu66D3UvFBVucRYL8S3W56It3Xqu0+InLGaXJJpNagVQBb476zB2 | + | | ZzZ5RJ/4Zyxw==', | + | | img_signature_certificate_uuid='125e6199-2de4-46e3-b091-8e2401ef0d63', | + | | img_signature_hash_method='SHA-256', | + | | img_signature_key_type='RSA-PSS', | + | | os_hash_algo='sha512', | + | | os_hash_value='cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a92 | + | | 1d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927d | + | | a3e', | + | | os_hidden='False' | + | protected | False | + | schema | /v2/schemas/image | + | size | 0 | + | status | active | + | tags | | + | updated_at | 2019-02-06T06:29:56Z | + | virtual_size | None | + | visibility | public | + +------------------+------------------------------------------------------------------------+ .. note:: Creating the image can fail if validation does not succeed. This will cause the image to be deleted and the Glance log to report that diff --git a/doc/source/user/config-drive.rst b/doc/source/user/config-drive.rst deleted file mode 100644 index feba61d2da8..00000000000 --- a/doc/source/user/config-drive.rst +++ /dev/null @@ -1,284 +0,0 @@ -======================================= -Store metadata on a configuration drive -======================================= -You can configure OpenStack to write metadata to a special configuration drive -that attaches to the instance when it boots. The instance can mount this drive -and read files from it to get information that is normally available through -the :doc:`metadata service `. -This metadata is different from the user data. - -One use case for using the configuration drive is to pass a networking -configuration when you do not use DHCP to assign IP addresses to -instances. For example, you might pass the IP address configuration for -the instance through the configuration drive, which the instance can -mount and access before you configure the network settings for the -instance. - -Any modern guest operating system that is capable of mounting an ISO -9660 or VFAT file system can use the configuration drive. - -Requirements and guidelines -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To use the configuration drive, you must follow the following -requirements for the compute host and image. - -**Compute host requirements** - -- The following hypervisors support the configuration drive: libvirt, - XenServer, Hyper-V, VMware, and (since 17.0.0 Queens) PowerVM. - - Also, the Bare Metal service supports the configuration drive. - -- To use configuration drive with libvirt, XenServer, or VMware, you - must first install the genisoimage package on each compute host. - Otherwise, instances do not boot properly. - - Use the ``mkisofs_cmd`` flag to set the path where you install the - genisoimage program. If genisoimage is in same path as the - ``nova-compute`` service, you do not need to set this flag. - -- To use configuration drive with Hyper-V, you must set the - ``mkisofs_cmd`` value to the full path to an ``mkisofs.exe`` - installation. Additionally, you must set the ``qemu_img_cmd`` value - in the ``hyperv`` configuration section to the full path to an - :command:`qemu-img` command installation. - -- To use configuration drive with PowerVM or the Bare Metal service, - you do not need to prepare anything because these treat the configuration - drive properly. - -**Image requirements** - -- An image built with a recent version of the cloud-init package can - automatically access metadata passed through the configuration drive. - The cloud-init package version 0.7.1 works with Ubuntu, Fedora - based images (such as Red Hat Enterprise Linux) and openSUSE based - images (such as SUSE Linux Enterprise Server). - -- If an image does not have the cloud-init package installed, you must - customize the image to run a script that mounts the configuration - drive on boot, reads the data from the drive, and takes appropriate - action such as adding the public key to an account. You can read more - details about how data is organized on the configuration drive. - -- If you use Xen with a configuration drive, use the - :oslo.config:option:`xenserver.disable_agent` configuration parameter to - disable the agent. - -**Guidelines** - -- Do not rely on the presence of the EC2 metadata in the configuration - drive, because this content might be removed in a future release. For - example, do not rely on files in the ``ec2`` directory. - -- When you create images that access configuration drive data and - multiple directories are under the ``openstack`` directory, always - select the highest API version by date that your consumer supports. - For example, if your guest image supports the 2012-03-05, 2012-08-05, - and 2013-04-13 versions, try 2013-04-13 first and fall back to a - previous version if 2013-04-13 is not present. - -Enable and access the configuration drive -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. To enable the configuration drive, pass the ``--config-drive true`` - parameter to the :command:`openstack server create` command. - - The following example enables the configuration drive and passes user - data, a user data file, and two key/value metadata pairs, all of which are - accessible from the configuration drive: - - .. code-block:: console - - $ openstack server create --config-drive true --image my-image-name \ - --flavor 1 --key-name mykey --user-data ./my-user-data.txt \ - --property role=webservers --property essential=false MYINSTANCE - - You can also configure the Compute service to always create a - configuration drive by setting the following option in the - ``/etc/nova/nova.conf`` file: - - .. code-block:: console - - force_config_drive = true - - It is also possible to force the config drive by specifying the - ``img_config_drive=mandatory`` property in the image. - - .. note:: - - If a user passes the ``--config-drive true`` flag to the - :command:`openstack server create` command, an administrator cannot - disable the configuration drive. - -#. If your guest operating system supports accessing disk by label, you - can mount the configuration drive as the - ``/dev/disk/by-label/configurationDriveVolumeLabel`` device. In the - following example, the configuration drive has the ``config-2`` - volume label: - - .. code-block:: console - - # mkdir -p /mnt/config - # mount /dev/disk/by-label/config-2 /mnt/config - -.. note:: - - Ensure that you use at least version 0.3.1 of CirrOS for - configuration drive support. - - If your guest operating system does not use ``udev``, the - ``/dev/disk/by-label`` directory is not present. - - You can use the :command:`blkid` command to identify the block device that - corresponds to the configuration drive. For example, when you boot - the CirrOS image with the ``m1.tiny`` flavor, the device is - ``/dev/vdb``: - - .. code-block:: console - - # blkid -t LABEL="config-2" -odevice - - .. code-block:: console - - /dev/vdb - - Once identified, you can mount the device: - - .. code-block:: console - - # mkdir -p /mnt/config - # mount /dev/vdb /mnt/config - -Configuration drive contents ----------------------------- - -In this example, the contents of the configuration drive are as follows:: - - ec2/2009-04-04/meta-data.json - ec2/2009-04-04/user-data - ec2/latest/meta-data.json - ec2/latest/user-data - openstack/2012-08-10/meta_data.json - openstack/2012-08-10/user_data - openstack/content - openstack/content/0000 - openstack/content/0001 - openstack/latest/meta_data.json - openstack/latest/user_data - -The files that appear on the configuration drive depend on the arguments -that you pass to the :command:`openstack server create` command. - -OpenStack metadata format -------------------------- - -The following example shows the contents of the -``openstack/2012-08-10/meta_data.json`` and -``openstack/latest/meta_data.json`` files. These files are identical. -The file contents are formatted for readability. - -.. code-block:: json - - { - "availability_zone": "nova", - "hostname": "test.novalocal", - "launch_index": 0, - "name": "test", - "meta": { - "role": "webservers", - "essential": "false" - }, - "public_keys": { - "mykey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDBqUfVvCSez0/Wfpd8dLLgZXV9GtXQ7hnMN+Z0OWQUyebVEHey1CXuin0uY1cAJMhUq8j98SiW+cU0sU4J3x5l2+xi1bodDm1BtFWVeLIOQINpfV1n8fKjHB+ynPpe1F6tMDvrFGUlJs44t30BrujMXBe8Rq44cCk6wqyjATA3rQ== Generated by Nova\n" - }, - "uuid": "83679162-1378-4288-a2d4-70e13ec132aa" - } - -EC2 metadata format -------------------- - -The following example shows the contents of the -``ec2/2009-04-04/meta-data.json`` and the ``ec2/latest/meta-data.json`` -files. These files are identical. The file contents are formatted to -improve readability. - -.. code-block:: json - - { - "ami-id": "ami-00000001", - "ami-launch-index": 0, - "ami-manifest-path": "FIXME", - "block-device-mapping": { - "ami": "sda1", - "ephemeral0": "sda2", - "root": "/dev/sda1", - "swap": "sda3" - }, - "hostname": "test.novalocal", - "instance-action": "none", - "instance-id": "i-00000001", - "instance-type": "m1.tiny", - "kernel-id": "aki-00000002", - "local-hostname": "test.novalocal", - "local-ipv4": null, - "placement": { - "availability-zone": "nova" - }, - "public-hostname": "test.novalocal", - "public-ipv4": "", - "public-keys": { - "0": { - "openssh-key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDBqUfVvCSez0/Wfpd8dLLgZXV9GtXQ7hnMN+Z0OWQUyebVEHey1CXuin0uY1cAJMhUq8j98SiW+cU0sU4J3x5l2+xi1bodDm1BtFWVeLIOQINpfV1n8fKjHB+ynPpe1F6tMDvrFGUlJs44t30BrujMXBe8Rq44cCk6wqyjATA3rQ== Generated by Nova\n" - } - }, - "ramdisk-id": "ari-00000003", - "reservation-id": "r-7lfps8wj", - "security-groups": [ - "default" - ] - } - -User data ---------- - -The ``openstack/2012-08-10/user_data``, ``openstack/latest/user_data``, -``ec2/2009-04-04/user-data``, and ``ec2/latest/user-data`` file are -present only if the ``--user-data`` flag and the contents of the user -data file are passed to the :command:`openstack server create` command. - -Configuration drive format --------------------------- - -The default format of the configuration drive as an ISO 9660 file -system. To explicitly specify the ISO 9660 format, add the following -line to the ``/etc/nova/nova.conf`` file: - -.. code-block:: console - - config_drive_format=iso9660 - -By default, you cannot attach the configuration drive image as a CD -drive instead of as a disk drive. To attach a CD drive, add the -following line to the ``/etc/nova/nova.conf`` file: - -.. code-block:: console - - [hyperv] - config_drive_cdrom=true - -.. note:: Attaching a configuration drive as a CD drive is only supported - by the Hyper-V compute driver. - -For legacy reasons, you can configure the configuration drive to use -VFAT format instead of ISO 9660. It is unlikely that you would require -VFAT format because ISO 9660 is widely supported across operating -systems. However, to use the VFAT format, add the following line to the -``/etc/nova/nova.conf`` file: - -.. code-block:: console - - config_drive_format=vfat - -If you choose VFAT, the configuration drive is 64 MB. diff --git a/doc/source/user/feature-classification.rst b/doc/source/user/feature-classification.rst index ceeafe572f5..db0ce886d97 100644 --- a/doc/source/user/feature-classification.rst +++ b/doc/source/user/feature-classification.rst @@ -65,6 +65,8 @@ create a particular service. It is common for this workloads needing bare metal like performance, i.e. low latency and close to line speed performance. +.. include:: /common/numa-live-migration-warning.txt + .. feature_matrix:: feature-matrix-nfv.ini .. _matrix-hpc: diff --git a/doc/source/user/feature-matrix-gp.ini b/doc/source/user/feature-matrix-gp.ini index 26cd61d2ba6..179974ddcb4 100644 --- a/doc/source/user/feature-matrix-gp.ini +++ b/doc/source/user/feature-matrix-gp.ini @@ -18,14 +18,6 @@ link=https://wiki.openstack.org/wiki/ThirdPartySystems/Virtuozzo_CI title=libvirt+virtuozzo VM link=https://wiki.openstack.org/wiki/ThirdPartySystems/Virtuozzo_Storage_CI -[target.libvirt-xen] -title=libvirt+xen -link=https://wiki.openstack.org/wiki/ThirdPartySystems/XenProject_CI - -[target.xenserver] -title=XenServer CI -link=https://wiki.openstack.org/wiki/XenServer/XenServer_CI - [target.vmware] title=VMware CI link=https://wiki.openstack.org/wiki/NovaVMware/Minesweeper @@ -34,6 +26,10 @@ link=https://wiki.openstack.org/wiki/NovaVMware/Minesweeper title=Hyper-V CI link=https://wiki.openstack.org/wiki/ThirdPartySystems/Hyper-V_CI +[target.zvm] +title=IBM zVM CI +link=https://wiki.openstack.org/wiki/ThirdPartySystems/IBM_z/VM_CI + [target.ironic] title=Ironic CI link= @@ -62,7 +58,7 @@ notes=This includes creating a server, and deleting a server. Specifically this is about booting a server from a glance image using the default disk and network configuration. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/#servers-servers +api_doc_link=https://docs.openstack.org/api-ref/compute/#servers-servers admin_doc_link=https://docs.openstack.org/nova/latest/user/launch-instances.html tempest_test_uuids=9a438d88-10c6-4bcd-8b5b-5b6e25e1346f;585e934c-448e-43c4-acbf-d06a9b899997 libvirt-kvm=complete @@ -71,18 +67,17 @@ libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=complete -xenserver=complete vmware=complete hyperv=complete ironic=unknown powervm=complete +zvm=complete [operation.snapshot-server] title=Snapshot Server notes=This is creating a glance image from the currently running server. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action +api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action admin_doc_link=https://docs.openstack.org/glance/latest/admin/troubleshooting.html tempest_test_uuids=aaacd1d0-55a2-4ce8-818a-b5439df8adc9 cli= @@ -92,18 +87,17 @@ libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=complete -xenserver=complete vmware=unknown hyperv=unknown ironic=unknown powervm=complete +zvm=complete [operation.power-ops] title=Server power ops notes=This includes reboot, shutdown and start. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action +api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action tempest_test_uuids=2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32;af8eafd4-38a7-4a4b-bdbc-75145a580560 cli= libvirt-kvm=complete @@ -112,18 +106,17 @@ libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=complete -xenserver=complete vmware=complete hyperv=complete ironic=unknown powervm=complete +zvm=complete [operation.rebuild-server] title=Rebuild Server notes=You can rebuild a server, optionally specifying the glance image to use. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action +api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action tempest_test_uuids=aaa6cdf3-55a7-461a-add9-1c8596b9a07c cli= libvirt-kvm=complete @@ -132,19 +125,18 @@ libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=complete -xenserver=complete vmware=complete hyperv=complete ironic=unknown powervm=missing +zvm=missing [operation.resize-server] title=Resize Server notes=You resize a server to a new flavor, then confirm or revert that operation. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action +api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=#servers-run-an-action-servers-action tempest_test_uuids=1499262a-9328-4eda-9068-db1ac57498d2 cli= libvirt-kvm=complete @@ -152,18 +144,17 @@ libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=complete libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=complete -xenserver=complete vmware=complete hyperv=complete ironic=unknown powervm=missing +zvm=missing [operation.server-volume-ops] title=Volume Operations notes=This is about attaching volumes, detaching volumes. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/#servers-with-volume-attachments-servers-os-volume-attachments +api_doc_link=https://docs.openstack.org/api-ref/compute/#servers-with-volume-attachments-servers-os-volume-attachments admin_doc_link=https://docs.openstack.org/cinder/latest/admin/blockstorage-manage-volumes.html tempest_test_uuids=fff42874-7db5-4487-a8e1-ddda5fb5288d cli= @@ -171,14 +162,13 @@ libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=complete libvirt-virtuozzo-vm=complete -libvirt-xen=complete -xenserver=complete vmware=complete hyperv=complete ironic=missing powervm=complete driver-notes-powervm=This is not tested for every CI run. Add a "powervm:volume-check" comment to trigger a CI job running volume tests. +zvm=missing [operation.server-bdm] title=Custom disk configurations on boot @@ -187,7 +177,7 @@ notes=This is about supporting all the features of BDMv2. specifying a custom set of ephemeral disks. Note some drivers only supports part of what the API allows. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=create-image-createimage-action-detail#create-server +api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=create-image-createimage-action-detail#create-server admin_doc_link=https://docs.openstack.org/nova/latest/user/block-device-mapping.html tempest_test_uuids=557cd2c2-4eb8-4dce-98be-f86765ff311b, 36c34c67-7b54-4b59-b188-02a2f458a63b cli= @@ -195,14 +185,12 @@ libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=complete -libvirt-xen=complete -xenserver=partial -driver-notes-xenserver=This is not tested in a CI system, and only partially implemented. vmware=partial driver-notes-vmware=This is not tested in a CI system, but it is implemented. hyperv=complete:n ironic=missing powervm=missing +zvm=missing [operation.server-neutron] title=Custom neutron configurations on boot @@ -210,7 +198,7 @@ notes=This is about supporting booting from one or more neutron ports, or all the related short cuts such as booting a specified network. This does not include SR-IOV or similar, just simple neutron ports. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/?&expanded=create-server-detail +api_doc_link=https://docs.openstack.org/api-ref/compute/?&expanded=create-server-detail admin_doc_link= tempest_test_uuids=2f3a0127-95c7-4977-92d2-bc5aec602fb4 cli= @@ -218,22 +206,20 @@ libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=unknown libvirt-virtuozzo-vm=unknown -libvirt-xen=partial -driver-notes-libvirt-xen=This is not tested in a CI system, but it is implemented. -xenserver=partial -driver-notes-xenserver=This is not tested in a CI system, but it is implemented. vmware=partial driver-notes-vmware=This is not tested in a CI system, but it is implemented. hyperv=partial driver-notes-hyperv=This is not tested in a CI system, but it is implemented. ironic=missing powervm=complete +zvm=partial +driver-notes-zvm=This is not tested in a CI system, but it is implemented. [operation.server-pause] title=Pause a Server notes=This is pause and unpause a server, where the state is held in memory. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/?#pause-server-pause-action +api_doc_link=https://docs.openstack.org/api-ref/compute/?#pause-server-pause-action admin_doc_link= tempest_test_uuids=bd61a9fd-062f-4670-972b-2d6c3e3b9e73 cli= @@ -242,19 +228,18 @@ libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=complete -xenserver=complete vmware=partial driver-notes-vmware=This is not tested in a CI system, but it is implemented. hyperv=complete ironic=missing powervm=missing +zvm=complete [operation.server-suspend] title=Suspend a Server notes=This suspend and resume a server, where the state is held on disk. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/?expanded=suspend-server-suspend-action-detail +api_doc_link=https://docs.openstack.org/api-ref/compute/?expanded=suspend-server-suspend-action-detail admin_doc_link= tempest_test_uuids=0d8ee21e-b749-462d-83da-b85b41c86c7f cli= @@ -264,18 +249,17 @@ libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=complete -xenserver=complete vmware=complete hyperv=complete ironic=missing powervm=missing +zvm=missing [operation.server-consoleoutput] title=Server console output notes=This gets the current server console output. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/#show-console-output-os-getconsoleoutput-action +api_doc_link=https://docs.openstack.org/api-ref/compute/#show-console-output-os-getconsoleoutput-action admin_doc_link= tempest_test_uuids=4b8867e6-fffa-4d54-b1d1-6fdda57be2f3 cli= @@ -283,21 +267,20 @@ libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=unknown libvirt-virtuozzo-vm=unknown -libvirt-xen=complete -xenserver=complete vmware=partial driver-notes-vmware=This is not tested in a CI system, but it is implemented. hyperv=partial driver-notes-hyperv=This is not tested in a CI system, but it is implemented. ironic=missing powervm=complete +zvm=complete [operation.server-rescue] title=Server Rescue notes=This boots a server with a new root disk from the specified glance image to allow a user to fix a boot partition configuration, or similar. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/#rescue-server-rescue-action +api_doc_link=https://docs.openstack.org/api-ref/compute/#rescue-server-rescue-action admin_doc_link= tempest_test_uuids=fd032140-714c-42e4-a8fd-adcd8df06be6;70cdb8a1-89f8-437d-9448-8844fd82bf46 cli= @@ -306,21 +289,20 @@ libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=complete -libvirt-xen=complete -xenserver=complete vmware=complete hyperv=partial driver-notes-hyperv=This is not tested in a CI system, but it is implemented. ironic=missing powervm=missing +zvm=missing [operation.server-configdrive] title=Server Config Drive notes=This ensures the user data provided by the user when booting a server is available in one of the expected config drive locations. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/#create-server -admin_doc_link=https://docs.openstack.org/nova/latest/user/config-drive.html +api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server +admin_doc_link=https://docs.openstack.org/nova/latest/admin/config-drive.html tempest_test_uuids=7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba cli= libvirt-kvm=complete @@ -328,19 +310,18 @@ libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=complete -xenserver=complete vmware=complete hyperv=complete ironic=partial driver-notes-ironic=This is not tested in a CI system, but it is implemented. powervm=complete +zvm=complete [operation.server-changepassword] title=Server Change Password notes=The ability to reset the password of a user within the server. maturity=experimental -api_doc_link=http://developer.openstack.org/api-ref/compute/#change-administrative-password-changepassword-action +api_doc_link=https://docs.openstack.org/api-ref/compute/#change-administrative-password-changepassword-action admin_doc_link= tempest_test_uuids=6158df09-4b82-4ab3-af6d-29cf36af858d cli= @@ -349,14 +330,12 @@ driver-notes-libvirt-kvm=This is not tested in a CI system, but it is implemente libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=missing -libvirt-xen=missing -xenserver=partial -driver-notes-xenserver=This is not tested in a CI system, but it is implemented. vmware=missing hyperv=partial driver-notes-hyperv=This is not tested in a CI system, but it is implemented. ironic=missing powervm=missing +zvm=missing [operation.server-shelve] title=Server Shelve and Unshelve @@ -364,7 +343,7 @@ notes=The ability to keep a server logically alive, but not using any cloud resources. For local disk based instances, this involves taking a snapshot, called offloading. maturity=complete -api_doc_link=http://developer.openstack.org/api-ref/compute/#shelve-server-shelve-action +api_doc_link=https://docs.openstack.org/api-ref/compute/#shelve-server-shelve-action admin_doc_link= tempest_test_uuids=1164e700-0af0-4a4c-8792-35909a88743c,c1b6318c-b9da-490b-9c67-9339b627271f cli= @@ -372,9 +351,8 @@ libvirt-kvm=complete libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=missing libvirt-virtuozzo-vm=complete -libvirt-xen=complete -xenserver=complete vmware=missing hyperv=complete ironic=missing -powervm=missing +powervm=complete +zvm=missing diff --git a/doc/source/user/feature-matrix-hpc.ini b/doc/source/user/feature-matrix-hpc.ini index ecd7c2f191d..e548d55b6e5 100644 --- a/doc/source/user/feature-matrix-hpc.ini +++ b/doc/source/user/feature-matrix-hpc.ini @@ -14,14 +14,6 @@ link=https://wiki.openstack.org/wiki/ThirdPartySystems/Virtuozzo_CI title=libvirt+virtuozzo VM link=https://wiki.openstack.org/wiki/ThirdPartySystems/Virtuozzo_Storage_CI -[target.libvirt-xen] -title=libvirt+xen -link=https://wiki.openstack.org/wiki/ThirdPartySystems/XenProject_CI - -[target.xenserver] -title=XenServer CI -link=https://wiki.openstack.org/wiki/XenServer/XenServer_CI - [target.vmware] title=VMware CI link=https://wiki.openstack.org/wiki/NovaVMware/Minesweeper @@ -47,7 +39,7 @@ notes=The PCI passthrough feature in OpenStack allows full access and direct installation is the only requirement for the guest to properly use the devices. maturity=experimental -api_doc_link=https://developer.openstack.org/api-ref/compute/#create-server +api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/pci-passthrough.html tempest_test_uuids=9a438d88-10c6-4bcd-8b5b-5b6e25e1346f;585e934c-448e-43c4-acbf-d06a9b899997 libvirt-kvm=complete:l @@ -56,8 +48,6 @@ libvirt-virtuozzo-ct=partial driver-notes-libvirt-virtuozzo-ct=This is not tested in a CI system, but it is implemented. libvirt-virtuozzo-vm=partial driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is implemented. -libvirt-xen=missing -xenserver=partial:k vmware=missing hyperv=missing ironic=unknown @@ -68,14 +58,12 @@ powervm=missing title=Virtual GPUs notes=Attach a virtual GPU to an instance at server creation time maturity=experimental -api_doc_link=https://developer.openstack.org/api-ref/compute/#create-server +api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/virtual-gpu.html libvirt-kvm=partial:queens libvirt-kvm-s390=unknown libvirt-virtuozzo-ct=unknown libvirt-virtuozzo-vm=unknown -libvirt-xen=unknown -xenserver=partial:queens vmware=missing hyperv=missing ironic=missing diff --git a/doc/source/user/feature-matrix-nfv.ini b/doc/source/user/feature-matrix-nfv.ini index 3c7b9741ae0..f3251db9c54 100644 --- a/doc/source/user/feature-matrix-nfv.ini +++ b/doc/source/user/feature-matrix-nfv.ini @@ -10,10 +10,6 @@ link=http://docs.openstack.org/infra/manual/developers.html#project-gating title=libvirt+kvm (s390x) link=http://docs.openstack.org/infra/manual/developers.html#project-gating -[target.libvirt-xen] -title=libvirt+xen -link=https://wiki.openstack.org/wiki/ThirdPartySystems/XenProject_CI - # # Lists all features # @@ -28,29 +24,26 @@ link=https://wiki.openstack.org/wiki/ThirdPartySystems/XenProject_CI title=NUMA Placement notes=Configure placement of instance vCPUs and memory across host NUMA nodes maturity=experimental -api_doc_link=https://developer.openstack.org/api-ref/compute/#create-server +api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#customizing-instance-cpu-pinning-policies tempest_test_uuids=9a438d88-10c6-4bcd-8b5b-5b6e25e1346f;585e934c-448e-43c4-acbf-d06a9b899997 libvirt-kvm=partial libvirt-kvm-s390=unknown -libvirt-xen=missing [operation.cpu-pinning-policy] title=CPU Pinning Policy notes=Enable/disable binding of instance vCPUs to host CPUs maturity=experimental -api_doc_link=https://developer.openstack.org/api-ref/compute/#create-server +api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#customizing-instance-cpu-pinning-policies libvirt-kvm=partial libvirt-kvm-s390=unknown -libvirt-xen=missing [operation.cpu-pinning-thread-policy] title=CPU Pinning Thread Policy notes=Configure usage of host hardware threads when pinning is used maturity=experimental -api_doc_link=https://developer.openstack.org/api-ref/compute/#create-server +api_doc_link=https://docs.openstack.org/api-ref/compute/#create-server admin_doc_link=https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#customizing-instance-cpu-pinning-policies libvirt-kvm=partial libvirt-kvm-s390=unknown -libvirt-xen=missing diff --git a/doc/source/user/filter-scheduler.rst b/doc/source/user/filter-scheduler.rst deleted file mode 100644 index ffcb04aad53..00000000000 --- a/doc/source/user/filter-scheduler.rst +++ /dev/null @@ -1,511 +0,0 @@ -Filter Scheduler -================ - -The **Filter Scheduler** supports `filtering` and `weighting` to make informed -decisions on where a new instance should be created. This Scheduler supports -working with Compute Nodes only. - -Filtering ---------- - -.. image:: /_static/images/filtering-workflow-1.png - -During its work Filter Scheduler iterates over all found compute nodes, -evaluating each against a set of filters. The list of resulting hosts is -ordered by weighers. The Scheduler then chooses hosts for the requested -number of instances, choosing the most weighted hosts. For a specific -filter to succeed for a specific host, the filter matches the user -request against the state of the host plus some extra magic as defined -by each filter (described in more detail below). - -If the Scheduler cannot find candidates for the next instance, it means that -there are no appropriate hosts where that instance can be scheduled. - -The Filter Scheduler has to be quite flexible to support the required variety -of `filtering` and `weighting` strategies. If this flexibility is insufficient -you can implement `your own filtering algorithm`. - -There are many standard filter classes which may be used -(:mod:`nova.scheduler.filters`): - -* |AllHostsFilter| - does no filtering. It passes all the available hosts. -* |ImagePropertiesFilter| - filters hosts based on properties defined - on the instance's image. It passes hosts that can support the properties - specified on the image used by the instance. -* |AvailabilityZoneFilter| - filters hosts by availability zone. It passes - hosts matching the availability zone specified in the instance properties. - Use a comma to specify multiple zones. The filter will then ensure it matches - any zone specified. -* |ComputeCapabilitiesFilter| - checks that the capabilities provided by the - host compute service satisfy any extra specifications associated with the - instance type. It passes hosts that can create the specified instance type. - - If an extra specs key contains a colon (:), anything before the colon is - treated as a namespace and anything after the colon is treated as the key to - be matched. If a namespace is present and is not ``capabilities``, the filter - ignores the namespace. For example ``capabilities:cpu_info:features`` is - a valid scope format. For backward compatibility, when a key doesn't contain - a colon (:), the key's contents are important. If this key is an attribute of - HostState object, like ``free_disk_mb``, the filter also treats the extra - specs key as the key to be matched. If not, the filter will ignore the key. - - The extra specifications can have an operator at the beginning of the value - string of a key/value pair. If there is no operator specified, then a - default operator of ``s==`` is used. Valid operators are: - - :: - - * = (equal to or greater than as a number; same as vcpus case) - * == (equal to as a number) - * != (not equal to as a number) - * >= (greater than or equal to as a number) - * <= (less than or equal to as a number) - * s== (equal to as a string) - * s!= (not equal to as a string) - * s>= (greater than or equal to as a string) - * s> (greater than as a string) - * s<= (less than or equal to as a string) - * s< (less than as a string) - * (substring) - * (all elements contained in collection) - * (find one of these) - - Examples are: ">= 5", "s== 2.1.0", " gcc", " aes mmx", and " fpu gpu" - - some of attributes that can be used as useful key and their values contains: - - :: - - * free_ram_mb (compared with a number, values like ">= 4096") - * free_disk_mb (compared with a number, values like ">= 10240") - * host (compared with a string, values like: " compute","s== compute_01") - * hypervisor_type (compared with a string, values like: "s== QEMU", "s== powervm") - * hypervisor_version (compared with a number, values like : ">= 1005003", "== 2000000") - * num_instances (compared with a number, values like: "<= 10") - * num_io_ops (compared with a number, values like: "<= 5") - * vcpus_total (compared with a number, values like: "= 48", ">=24") - * vcpus_used (compared with a number, values like: "= 0", "<= 10") - -* |AggregateInstanceExtraSpecsFilter| - checks that the aggregate metadata - satisfies any extra specifications associated with the instance type (that - have no scope or are scoped with ``aggregate_instance_extra_specs``). - It passes hosts that can create the specified instance type. - The extra specifications can have the same operators as - |ComputeCapabilitiesFilter|. To specify multiple values for the same key - use a comma. E.g., "value1,value2". All hosts are passed if no extra_specs - are specified. -* |ComputeFilter| - passes all hosts that are operational and enabled. -* |CoreFilter| - filters based on CPU core utilization. It passes hosts with - sufficient number of CPU cores. -* |AggregateCoreFilter| - filters hosts by CPU core number with per-aggregate - ``cpu_allocation_ratio`` setting. If no per-aggregate value is found, it will - fall back to the global default ``cpu_allocation_ratio``. If more than one value - is found for a host (meaning the host is in two different aggregates with - different ratio settings), the minimum value will be used. -* |IsolatedHostsFilter| - filter based on ``isolated_images``, ``isolated_hosts`` - and ``restrict_isolated_hosts_to_isolated_images`` flags. -* |JsonFilter| - allows simple JSON-based grammar for selecting hosts. -* |RamFilter| - filters hosts by their RAM. Only hosts with sufficient RAM - to host the instance are passed. -* |AggregateRamFilter| - filters hosts by RAM with per-aggregate - ``ram_allocation_ratio`` setting. If no per-aggregate value is found, it will - fall back to the global default ``ram_allocation_ratio``. If more than one value - is found for a host (meaning the host is in two different aggregates with - different ratio settings), the minimum value will be used. -* |DiskFilter| - filters hosts by their disk allocation. Only hosts with sufficient - disk space to host the instance are passed. - ``disk_allocation_ratio`` setting. The virtual disk to physical disk - allocation ratio, 1.0 by default. The total allowed allocated disk size will - be physical disk multiplied this ratio. -* |AggregateDiskFilter| - filters hosts by disk allocation with per-aggregate - ``disk_allocation_ratio`` setting. If no per-aggregate value is found, it will - fall back to the global default ``disk_allocation_ratio``. If more than one value - is found for a host (meaning the host is in two or more different aggregates with - different ratio settings), the minimum value will be used. -* |NumInstancesFilter| - filters compute nodes by number of running instances. Nodes - with too many instances will be filtered. - ``max_instances_per_host`` setting. Maximum number of instances allowed to run on - this host. The host will be ignored by the scheduler if more than ``max_instances_per_host`` - already exist on the host. -* |AggregateNumInstancesFilter| - filters hosts by number of instances with - per-aggregate ``max_instances_per_host`` setting. If no per-aggregate value - is found, it will fall back to the global default ``max_instances_per_host``. - If more than one value is found for a host (meaning the host is in two or more - different aggregates with different max instances per host settings), - the minimum value will be used. -* |IoOpsFilter| - filters hosts by concurrent I/O operations on it. - hosts with too many concurrent I/O operations will be filtered. - ``max_io_ops_per_host`` setting. Maximum number of I/O intensive instances allowed to - run on this host, the host will be ignored by scheduler if more than ``max_io_ops_per_host`` - instances such as build/resize/snapshot etc are running on it. -* |AggregateIoOpsFilter| - filters hosts by I/O operations with per-aggregate - ``max_io_ops_per_host`` setting. If no per-aggregate value is found, it will - fall back to the global default ``max_io_ops_per_host``. If more than - one value is found for a host (meaning the host is in two or more different - aggregates with different max io operations settings), the minimum value - will be used. -* |PciPassthroughFilter| - Filter that schedules instances on a host if the host - has devices to meet the device requests in the 'extra_specs' for the flavor. -* |SimpleCIDRAffinityFilter| - allows a new instance on a host within - the same IP block. -* |DifferentHostFilter| - allows the instance on a different host from a - set of instances. -* |SameHostFilter| - puts the instance on the same host as another instance in - a set of instances. -* |RetryFilter| - filters hosts that have been attempted for scheduling. - Only passes hosts that have not been previously attempted. -* |AggregateTypeAffinityFilter| - limits instance_type by aggregate. - This filter passes hosts if no instance_type key is set or - the instance_type aggregate metadata value contains the name of the - instance_type requested. The value of the instance_type metadata entry is - a string that may contain either a single instance_type name or a comma - separated list of instance_type names. e.g. 'm1.nano' or "m1.nano,m1.small" -* |ServerGroupAntiAffinityFilter| - This filter implements anti-affinity for a - server group. First you must create a server group with a policy of - 'anti-affinity' via the server groups API. Then, when you boot a new server, - provide a scheduler hint of 'group=' where is the UUID of the - server group you created. This will result in the server getting added to the - group. When the server gets scheduled, anti-affinity will be enforced among - all servers in that group. -* |ServerGroupAffinityFilter| - This filter works the same way as - ServerGroupAntiAffinityFilter. The difference is that when you create the server - group, you should specify a policy of 'affinity'. -* |AggregateMultiTenancyIsolation| - isolate tenants in specific aggregates. - To specify multiple tenants use a comma. Eg. "tenant1,tenant2" -* |AggregateImagePropertiesIsolation| - isolates hosts based on image - properties and aggregate metadata. Use a comma to specify multiple values for the - same property. The filter will then ensure at least one value matches. -* |MetricsFilter| - filters hosts based on metrics weight_setting. Only hosts with - the available metrics are passed. -* |NUMATopologyFilter| - filters hosts based on the NUMA topology requested by the - instance, if any. - -Now we can focus on these standard filter classes in some detail. We'll skip the -simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter|, -because their functionality is relatively simple and can be understood from the -code. For example class |RamFilter| has the next realization: - -:: - - class RamFilter(filters.BaseHostFilter): - """Ram Filter with over subscription flag""" - - def host_passes(self, host_state, filter_properties): - """Only return hosts with sufficient available RAM.""" - instance_type = filter_properties.get('instance_type') - requested_ram = instance_type['memory_mb'] - free_ram_mb = host_state.free_ram_mb - total_usable_ram_mb = host_state.total_usable_ram_mb - used_ram_mb = total_usable_ram_mb - free_ram_mb - return total_usable_ram_mb * FLAGS.ram_allocation_ratio - used_ram_mb >= requested_ram - -Here ``ram_allocation_ratio`` means the virtual RAM to physical RAM allocation -ratio (it is ``1.5`` by default). - -The |AvailabilityZoneFilter| looks at the availability zone of compute node -and availability zone from the properties of the request. Each compute service -has its own availability zone. So deployment engineers have an option to run -scheduler with availability zones support and can configure availability zones -on each compute host. This class's method ``host_passes`` returns ``True`` if -availability zone mentioned in request is the same on the current compute host. - -The |ImagePropertiesFilter| filters hosts based on the architecture, -hypervisor type and virtual machine mode specified in the -instance. For example, an instance might require a host that supports the ARM -architecture on a qemu compute host. The |ImagePropertiesFilter| will only -pass hosts that can satisfy this request. These instance -properties are populated from properties defined on the instance's image. -E.g. an image can be decorated with these properties using -``glance image-update img-uuid --property architecture=arm --property -hypervisor_type=qemu`` -Only hosts that satisfy these requirements will pass the -|ImagePropertiesFilter|. - -|ComputeCapabilitiesFilter| checks if the host satisfies any ``extra_specs`` -specified on the instance type. The ``extra_specs`` can contain key/value pairs. -The key for the filter is either non-scope format (i.e. no ``:`` contained), or -scope format in capabilities scope (i.e. ``capabilities:xxx:yyy``). One example -of capabilities scope is ``capabilities:cpu_info:features``, which will match -host's cpu features capabilities. The |ComputeCapabilitiesFilter| will only -pass hosts whose capabilities satisfy the requested specifications. All hosts -are passed if no ``extra_specs`` are specified. - -|ComputeFilter| is quite simple and passes any host whose compute service is -enabled and operational. - -Now we are going to |IsolatedHostsFilter|. There can be some special hosts -reserved for specific images. These hosts are called **isolated**. So the -images to run on the isolated hosts are also called isolated. The filter -checks if ``isolated_images`` flag named in instance specifications is the same -as the host specified in ``isolated_hosts``. Isolated hosts can run non-isolated -images if the flag ``restrict_isolated_hosts_to_isolated_images`` is set to false. - -|DifferentHostFilter| - method ``host_passes`` returns ``True`` if the host to -place an instance on is different from all the hosts used by a set of instances. - -|SameHostFilter| does the opposite to what |DifferentHostFilter| does. -``host_passes`` returns ``True`` if the host we want to place an instance on is -one of the hosts used by a set of instances. - -|SimpleCIDRAffinityFilter| looks at the subnet mask and investigates if -the network address of the current host is in the same sub network as it was -defined in the request. - -|JsonFilter| - this filter provides the opportunity to write complicated -queries for the hosts capabilities filtering, based on simple JSON-like syntax. -There can be used the following operations for the host states properties: -``=``, ``<``, ``>``, ``in``, ``<=``, ``>=``, that can be combined with the following -logical operations: ``not``, ``or``, ``and``. For example, the following query can be -found in tests: - -:: - - ['and', - ['>=', '$free_ram_mb', 1024], - ['>=', '$free_disk_mb', 200 * 1024] - ] - -This query will filter all hosts with free RAM greater or equal than 1024 MB -and at the same time with free disk space greater or equal than 200 GB. - -Many filters use data from ``scheduler_hints``, that is defined in the moment of -creation of the new server for the user. The only exception for this rule is -|JsonFilter|, that takes data from the schedulers ``HostState`` data structure -directly. Variable naming, such as the ``$free_ram_mb`` example above, should -be based on those attributes. - -The |RetryFilter| filters hosts that have already been attempted for -scheduling. It only passes hosts that have not been previously attempted. If a -compute node is raising an exception when spawning an instance, then the -compute manager will reschedule it by adding the failing host to a retry -dictionary so that the RetryFilter will not accept it as a possible -destination. That means that if all of your compute nodes are failing, then the -RetryFilter will return 0 hosts and the scheduler will raise a NoValidHost -exception even if the problem is related to 1:N compute nodes. If you see that -case in the scheduler logs, then your problem is most likely related to a -compute problem and you should check the compute logs. - -The |NUMATopologyFilter| considers the NUMA topology that was specified for the instance -through the use of flavor extra_specs in combination with the image properties, as -described in detail in the related nova-spec document: - -* http://git.openstack.org/cgit/openstack/nova-specs/tree/specs/juno/implemented/virt-driver-numa-placement.rst - -and try to match it with the topology exposed by the host, accounting for the -``ram_allocation_ratio`` and ``cpu_allocation_ratio`` for over-subscription. The -filtering is done in the following manner: - -* Filter will attempt to pack instance cells onto host cells. -* It will consider the standard over-subscription limits for each host NUMA cell, - and provide limits to the compute host accordingly (as mentioned above). -* If instance has no topology defined, it will be considered for any host. -* If instance has a topology defined, it will be considered only for NUMA - capable hosts. - -Configuring Filters -------------------- - -To use filters you specify two settings: - -* ``filter_scheduler.available_filters`` - Defines filter classes made - available to the scheduler. This setting can be used multiple times. -* ``filter_scheduler.enabled_filters`` - Of the available filters, defines - those that the scheduler uses by default. - -The default values for these settings in nova.conf are: - -:: - - --filter_scheduler.available_filters=nova.scheduler.filters.all_filters - --filter_scheduler.enabled_filters=ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter - -With this configuration, all filters in ``nova.scheduler.filters`` -would be available, and by default the |ComputeFilter|, -|AvailabilityZoneFilter|, |ComputeCapabilitiesFilter|, -|ImagePropertiesFilter|, |ServerGroupAntiAffinityFilter|, -and |ServerGroupAffinityFilter| would be used. - -Each filter selects hosts in a different way and has different costs. The order -of ``filter_scheduler.enabled_filters`` affects scheduling performance. The -general suggestion is to filter out invalid hosts as soon as possible to avoid -unnecessary costs. We can sort ``filter_scheduler.enabled_filters`` items by -their costs in reverse order. For example, ComputeFilter is better before any -resource calculating filters like RamFilter, CoreFilter. - -In medium/large environments having AvailabilityZoneFilter before any -capability or resource calculating filters can be useful. - -Writing Your Own Filter ------------------------ - -To create **your own filter** you must inherit from -|BaseHostFilter| and implement one method: ``host_passes``. -This method should return ``True`` if a host passes the filter and return -``False`` elsewhere. -It takes two parameters (named arbitrarily as ``host_state`` and ``spec_obj``): - -* the ``HostState`` object allows to get attributes of the host. -* the ``RequestSpec`` object describes the user request, including the flavor, - the image and the scheduler hints. - -For further details about each of those objects and their corresponding -attributes, please refer to the codebase (at least by looking at the other -filters code) or ask for help in the #openstack-nova IRC channel. - -As an example, nova.conf could contain the following scheduler-related -settings: - -:: - - --scheduler.driver=nova.scheduler.FilterScheduler - --filter_scheduler.available_filters=nova.scheduler.filters.all_filters - --filter_scheduler.available_filters=myfilter.MyFilter - --filter_scheduler.enabled_filters=RamFilter,ComputeFilter,MyFilter - -.. note:: When writing your own filter, be sure to add it to the list of available filters - and enable it in the default filters. The "all_filters" setting only includes the - filters shipped with nova. - -With these settings, nova will use the ``FilterScheduler`` for the scheduler -driver. All of the standard nova filters and MyFilter are available to the -FilterScheduler, but just the RamFilter, ComputeFilter, and MyFilter will be -used on each request. - -Weights -------- - -Filter Scheduler uses the so-called **weights** during its work. A weigher is a -way to select the best suitable host from a group of valid hosts by giving -weights to all the hosts in the list. - -In order to prioritize one weigher against another, all the weighers have to -define a multiplier that will be applied before computing the weight for a node. -All the weights are normalized beforehand so that the multiplier can be applied -easily. Therefore the final weight for the object will be:: - - weight = w1_multiplier * norm(w1) + w2_multiplier * norm(w2) + ... - -A weigher should be a subclass of ``weights.BaseHostWeigher`` and they can implement -both the ``weight_multiplier`` and ``_weight_object`` methods or just implement the -``weight_objects`` method. ``weight_objects`` method is overridden only if you need -access to all objects in order to calculate weights, and it just return a list of weights, -and not modify the weight of the object directly, since final weights are normalized -and computed by ``weight.BaseWeightHandler``. - -The Filter Scheduler weighs hosts based on the config option -`filter_scheduler.weight_classes`, this defaults to -`nova.scheduler.weights.all_weighers`, which selects the following weighers: - -* |RAMWeigher| Compute weight based on available RAM on the compute node. - Sort with the largest weight winning. If the multiplier, - :oslo.config:option:`filter_scheduler.ram_weight_multiplier`, is negative, the - host with least RAM available will win (useful for stacking hosts, instead - of spreading). -* |CPUWeigher| Compute weight based on available vCPUs on the compute node. - Sort with the largest weight winning. If the multiplier, - :oslo.config:option:`filter_scheduler.cpu_weight_multiplier`, is negative, the - host with least CPUs available will win (useful for stacking hosts, instead - of spreading). -* |DiskWeigher| Hosts are weighted and sorted by free disk space with the largest - weight winning. If the multiplier is negative, the host with less disk space available - will win (useful for stacking hosts, instead of spreading). -* |MetricsWeigher| This weigher can compute the weight based on the compute node - host's various metrics. The to-be weighed metrics and their weighing ratio - are specified in the configuration file as the followings:: - - metrics_weight_setting = name1=1.0, name2=-1.0 - -* |IoOpsWeigher| The weigher can compute the weight based on the compute node - host's workload. The default is to preferably choose light workload compute - hosts. If the multiplier is positive, the weigher prefer choosing heavy - workload compute hosts, the weighing has the opposite effect of the default. - -* |PCIWeigher| Compute a weighting based on the number of PCI devices on the - host and the number of PCI devices requested by the instance. For example, - given three hosts - one with a single PCI device, one with many PCI devices, - and one with no PCI devices - nova should prioritise these differently based - on the demands of the instance. If the instance requests a single PCI device, - then the first of the hosts should be preferred. Similarly, if the instance - requests multiple PCI devices, then the second of these hosts would be - preferred. Finally, if the instance does not request a PCI device, then the - last of these hosts should be preferred. - - For this to be of any value, at least one of the |PciPassthroughFilter| or - |NUMATopologyFilter| filters must be enabled. - - :Configuration Option: ``[filter_scheduler] pci_weight_multiplier``. Only - positive values are allowed for the multiplier as a negative value would - force non-PCI instances away from non-PCI hosts, thus, causing future - scheduling issues. - -* |ServerGroupSoftAffinityWeigher| The weigher can compute the weight based - on the number of instances that run on the same server group. The largest - weight defines the preferred host for the new instance. For the multiplier - only a positive value is meaningful for the calculation as a negative value - would mean that the affinity weigher would prefer non collocating placement. - -* |ServerGroupSoftAntiAffinityWeigher| The weigher can compute the weight based - on the number of instances that run on the same server group as a negative - value. The largest weight defines the preferred host for the new instance. - For the multiplier only a positive value is meaningful for the calculation as - a negative value would mean that the anti-affinity weigher would prefer - collocating placement. - -* |BuildFailureWeigher| Weigh hosts by the number of recent failed boot attempts. - It considers the build failure counter and can negatively weigh hosts with - recent failures. This avoids taking computes fully out of rotation. - -Filter Scheduler makes a local list of acceptable hosts by repeated filtering and -weighing. Each time it chooses a host, it virtually consumes resources on it, -so subsequent selections can adjust accordingly. It is useful if the customer -asks for a large block of instances, because weight is computed for -each instance requested. - -.. image:: /_static/images/filtering-workflow-2.png - -At the end Filter Scheduler sorts selected hosts by their weight and attempts -to provision instances on the chosen hosts. - -P.S.: you can find more examples of using Filter Scheduler and standard filters -in :mod:`nova.tests.scheduler`. - -.. |AllHostsFilter| replace:: :class:`AllHostsFilter ` -.. |ImagePropertiesFilter| replace:: :class:`ImagePropertiesFilter ` -.. |AvailabilityZoneFilter| replace:: :class:`AvailabilityZoneFilter ` -.. |BaseHostFilter| replace:: :class:`BaseHostFilter ` -.. |ComputeCapabilitiesFilter| replace:: :class:`ComputeCapabilitiesFilter ` -.. |ComputeFilter| replace:: :class:`ComputeFilter ` -.. |CoreFilter| replace:: :class:`CoreFilter ` -.. |AggregateCoreFilter| replace:: :class:`AggregateCoreFilter ` -.. |IsolatedHostsFilter| replace:: :class:`IsolatedHostsFilter ` -.. |JsonFilter| replace:: :class:`JsonFilter ` -.. |RamFilter| replace:: :class:`RamFilter ` -.. |AggregateRamFilter| replace:: :class:`AggregateRamFilter ` -.. |DiskFilter| replace:: :class:`DiskFilter ` -.. |AggregateDiskFilter| replace:: :class:`AggregateDiskFilter ` -.. |NumInstancesFilter| replace:: :class:`NumInstancesFilter ` -.. |AggregateNumInstancesFilter| replace:: :class:`AggregateNumInstancesFilter ` -.. |IoOpsFilter| replace:: :class:`IoOpsFilter ` -.. |AggregateIoOpsFilter| replace:: :class:`AggregateIoOpsFilter ` -.. |PciPassthroughFilter| replace:: :class:`PciPassthroughFilter ` -.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter ` -.. |DifferentHostFilter| replace:: :class:`DifferentHostFilter ` -.. |SameHostFilter| replace:: :class:`SameHostFilter ` -.. |RetryFilter| replace:: :class:`RetryFilter ` -.. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter ` -.. |ServerGroupAntiAffinityFilter| replace:: :class:`ServerGroupAntiAffinityFilter ` -.. |ServerGroupAffinityFilter| replace:: :class:`ServerGroupAffinityFilter ` -.. |AggregateInstanceExtraSpecsFilter| replace:: :class:`AggregateInstanceExtraSpecsFilter ` -.. |AggregateMultiTenancyIsolation| replace:: :class:`AggregateMultiTenancyIsolation ` -.. |NUMATopologyFilter| replace:: :class:`NUMATopologyFilter ` -.. |RAMWeigher| replace:: :class:`RAMWeigher ` -.. |CPUWeigher| replace:: :class:`CPUWeigher ` -.. |AggregateImagePropertiesIsolation| replace:: :class:`AggregateImagePropertiesIsolation ` -.. |MetricsFilter| replace:: :class:`MetricsFilter ` -.. |MetricsWeigher| replace:: :class:`MetricsWeigher ` -.. |IoOpsWeigher| replace:: :class:`IoOpsWeigher ` -.. |PCIWeigher| replace:: :class:`PCIWeigher ` -.. |ServerGroupSoftAffinityWeigher| replace:: :class:`ServerGroupSoftAffinityWeigher ` -.. |ServerGroupSoftAntiAffinityWeigher| replace:: :class:`ServerGroupSoftAntiAffinityWeigher ` -.. |DiskWeigher| replace:: :class:`DiskWeigher ` -.. |BuildFailureWeigher| replace:: :class:`BuildFailureWeigher ` diff --git a/doc/source/user/flavors.rst b/doc/source/user/flavors.rst index 77c4ed19094..946ac241a61 100644 --- a/doc/source/user/flavors.rst +++ b/doc/source/user/flavors.rst @@ -41,7 +41,7 @@ Root Disk GB The root disk is an ephemeral disk that the base image is copied into. When booting from a persistent volume it is not used. The ``0`` size is a special case which uses the native base image size as the size of the ephemeral root - volume. However, in this case the filter scheduler cannot select the compute + volume. However, in this case the scheduler cannot select the compute host based on the virtual image size. As a result, ``0`` should only be used for volume booted instances or for testing purposes. Volume-backed instances can be enforced for flavors with zero root disk via the @@ -59,16 +59,12 @@ Swap Amount of swap space (in megabytes) to use. This property is optional. If unspecified, the value is ``0`` by default. -RXTX Factor - The receive/transmit factor of any network ports on the instance. This - property is optional. If unspecified, the value is ``1.0`` by default. - - .. note:: - - This property only applies if using the ``xen`` compute driver with the - ``nova-network`` network driver. It will likely be deprecated in a future - release. ``neutron`` users should refer to the :neutron-doc:`neutron QoS - documentation ` +RXTX Factor (DEPRECATED) + This value was only applicable when using the ``xen`` compute driver with the + ``nova-network`` network driver. Since ``nova-network`` has been removed, + this no longer applies and should not be specified. It will likely be + removed in a future release. ``neutron`` users should refer to the + :neutron-doc:`neutron QoS documentation ` Is Public Boolean value that defines whether the flavor is available to all users or @@ -88,615 +84,185 @@ Extra Specs options. For more information on the standardized extra specs available, :ref:`see below ` +Description + A free form description of the flavor. Limited to 65535 characters in length. + Only printable characters are allowed. Available starting in + microversion 2.55. + .. _flavors-extra-specs: Extra Specs ~~~~~~~~~~~ -.. TODO: Consider adding a table of contents here for the various extra specs - or make them sub-sections. - .. todo:: - A lot of these need investigation - for example, I can find no reference to - the ``cpu_shares_level`` option outside of documentation and (possibly) - useless tests. We should assess which drivers each option actually apply to. - -CPU limits - You can configure the CPU limits with control parameters. For example, to - configure the I/O limit, use: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:read_bytes_sec=10240000 \ - --property quota:write_bytes_sec=10240000 - - Use these optional parameters to control weight shares, enforcement intervals - for runtime quotas, and a quota for maximum allowed bandwidth: - - - ``cpu_shares``: Specifies the proportional weighted share for the domain. - If this element is omitted, the service defaults to the OS provided - defaults. There is no unit for the value; it is a relative measure based on - the setting of other VMs. For example, a VM configured with value 2048 gets - twice as much CPU time as a VM configured with value 1024. - - - ``cpu_shares_level``: On VMware, specifies the allocation level. Can be - ``custom``, ``high``, ``normal``, or ``low``. If you choose ``custom``, set - the number of shares using ``cpu_shares_share``. - - - ``cpu_period``: Specifies the enforcement interval (unit: microseconds) - for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is - not allowed to consume more than the quota worth of runtime. The value - should be in range ``[1000, 1000000]``. A period with value 0 means no - value. - - - ``cpu_limit``: Specifies the upper limit for VMware machine CPU allocation - in MHz. This parameter ensures that a machine never uses more than the - defined amount of CPU time. It can be used to enforce a limit on the - machine's CPU performance. - - - ``cpu_reservation``: Specifies the guaranteed minimum CPU reservation in - MHz for VMware. This means that if needed, the machine will definitely get - allocated the reserved amount of CPU cycles. - - - ``cpu_quota``: Specifies the maximum allowed bandwidth (unit: - microseconds). A domain with a negative-value quota indicates that the - domain has infinite bandwidth, which means that it is not bandwidth - controlled. The value should be in range ``[1000, 18446744073709551]`` or - less than 0. A quota with value 0 means no value. You can use this feature - to ensure that all vCPUs run at the same speed. For example: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:cpu_quota=10000 \ - --property quota:cpu_period=20000 - - In this example, an instance of ``FLAVOR-NAME`` can only consume a maximum - of 50% CPU of a physical CPU computing capability. + This is now documented in :doc:`/configuration/extra-specs`, so this should + be removed and the documentation moved to those specs. -Memory limits - For VMware, you can configure the memory limits with control parameters. - - Use these optional parameters to limit the memory allocation, guarantee - minimum memory reservation, and to specify shares used in case of resource - contention: - - - ``memory_limit``: Specifies the upper limit for VMware machine memory - allocation in MB. The utilization of a virtual machine will not exceed this - limit, even if there are available resources. This is typically used to - ensure a consistent performance of virtual machines independent of - available resources. - - - ``memory_reservation``: Specifies the guaranteed minimum memory reservation - in MB for VMware. This means the specified amount of memory will definitely - be allocated to the machine. - - - ``memory_shares_level``: On VMware, specifies the allocation level. This - can be ``custom``, ``high``, ``normal`` or ``low``. If you choose - ``custom``, set the number of shares using ``memory_shares_share``. - - - ``memory_shares_share``: Specifies the number of shares allocated in the - event that ``custom`` is used. There is no unit for this value. It is a - relative measure based on the settings for other VMs. For example: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:memory_shares_level=custom \ - --property quota:memory_shares_share=15 - -Disk I/O limits - For VMware, you can configure the resource limits for disk with control - parameters. - - Use these optional parameters to limit the disk utilization, guarantee disk - allocation, and to specify shares used in case of resource contention. This - allows the VMware driver to enable disk allocations for the running instance. - - - ``disk_io_limit``: Specifies the upper limit for disk utilization in I/O - per second. The utilization of a virtual machine will not exceed this - limit, even if there are available resources. The default value is -1 which - indicates unlimited usage. - - - ``disk_io_reservation``: Specifies the guaranteed minimum disk allocation - in terms of Input/output Operations Per Second (IOPS). - - - ``disk_io_shares_level``: Specifies the allocation level. This can be - ``custom``, ``high``, ``normal`` or ``low``. If you choose custom, set the - number of shares using ``disk_io_shares_share``. - - - ``disk_io_shares_share``: Specifies the number of shares allocated in the - event that ``custom`` is used. When there is resource contention, this - value is used to determine the resource allocation. - - The example below sets the ``disk_io_reservation`` to 2000 IOPS. - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:disk_io_reservation=2000 - -Disk tuning - Using disk I/O quotas, you can set maximum disk write to 10 MB per second for - a VM user. For example: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:disk_write_bytes_sec=10485760 - - The disk I/O options are: - - - ``disk_read_bytes_sec`` - - ``disk_read_iops_sec`` - - ``disk_write_bytes_sec`` - - ``disk_write_iops_sec`` - - ``disk_total_bytes_sec`` - - ``disk_total_iops_sec`` - -Bandwidth I/O - The vif I/O options are: - - - ``vif_inbound_average`` - - ``vif_inbound_burst`` - - ``vif_inbound_peak`` - - ``vif_outbound_average`` - - ``vif_outbound_burst`` - - ``vif_outbound_peak`` - - Incoming and outgoing traffic can be shaped independently. The bandwidth - element can have at most, one inbound and at most, one outbound child - element. If you leave any of these child elements out, no quality of service - (QoS) is applied on that traffic direction. So, if you want to shape only the - network's incoming traffic, use inbound only (and vice versa). Each element - has one mandatory attribute average, which specifies the average bit rate on - the interface being shaped. - - There are also two optional attributes (integer): ``peak``, which specifies - the maximum rate at which a bridge can send data (kilobytes/second), and - ``burst``, the amount of bytes that can be burst at peak speed (kilobytes). - The rate is shared equally within domains connected to the network. - - The example below sets network traffic bandwidth limits for existing flavor - as follows: - - - Outbound traffic: - - - average: 262 Mbps (32768 kilobytes/second) - - - peak: 524 Mbps (65536 kilobytes/second) - - - burst: 65536 kilobytes - - - Inbound traffic: - - - average: 262 Mbps (32768 kilobytes/second) - - - peak: 524 Mbps (65536 kilobytes/second) - - - burst: 65536 kilobytes - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property quota:vif_outbound_average=32768 \ - --property quota:vif_outbound_peak=65536 \ - --property quota:vif_outbound_burst=65536 \ - --property quota:vif_inbound_average=32768 \ - --property quota:vif_inbound_peak=65536 \ - --property quota:vif_inbound_burst=65536 - - .. note:: - - All the speed limit values in above example are specified in - kilobytes/second. And burst values are in kilobytes. Values were converted - using `Data rate units on Wikipedia - `_. +.. _extra-specs-hardware-video-ram: Hardware video RAM Specify ``hw_video:ram_max_mb`` to control the maximum RAM for the video image. Used in conjunction with the ``hw_video_ram`` image property. ``hw_video_ram`` must be less than or equal to ``hw_video:ram_max_mb``. - This is currently only supported by the libvirt driver. + This is currently supported by the libvirt and the vmware drivers. See https://libvirt.org/formatdomain.html#elementsVideo for more information on how this is used to set the ``vram`` attribute with the libvirt driver. -Watchdog behavior - For the libvirt driver, you can enable and set the behavior of a virtual - hardware watchdog device for each flavor. Watchdog devices keep an eye on the - guest server, and carry out the configured action, if the server hangs. The - watchdog uses the i6300esb device (emulating a PCI Intel 6300ESB). If - ``hw:watchdog_action`` is not specified, the watchdog is disabled. - - To set the behavior, use: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME --property hw:watchdog_action=ACTION - - Valid ACTION values are: - - - ``disabled``: (default) The device is not attached. - - ``reset``: Forcefully reset the guest. - - ``poweroff``: Forcefully power off the guest. - - ``pause``: Pause the guest. - - ``none``: Only enable the watchdog; do nothing if the server hangs. - - .. note:: - - Watchdog behavior set using a specific image's properties will override - behavior set using flavors. - -Random-number generator - If a random-number generator device has been added to the instance through - its image properties, the device can be enabled and configured using: - - .. code-block:: console - - $ openstack flavor set FLAVOR-NAME \ - --property hw_rng:allowed=True \ - --property hw_rng:rate_bytes=RATE-BYTES \ - --property hw_rng:rate_period=RATE-PERIOD - - Where: - - - RATE-BYTES: (integer) Allowed amount of bytes that the guest can read from - the host's entropy per period. - - RATE-PERIOD: (integer) Duration of the read period in seconds. - -CPU topology - For the libvirt driver, you can define the topology of the processors in the - virtual machine using properties. The properties with ``max`` limit the - number that can be selected by the user with image properties. + See https://pubs.vmware.com/vi-sdk/visdk250/ReferenceGuide/vim.vm.device.VirtualVideoCard.html + for more information on how this is used to set the ``videoRamSizeInKB`` attribute with + the vmware driver. - .. code-block:: console +.. _extra-specs-secure-boot: - $ openstack flavor set FLAVOR-NAME \ - --property hw:cpu_sockets=FLAVOR-SOCKETS \ - --property hw:cpu_cores=FLAVOR-CORES \ - --property hw:cpu_threads=FLAVOR-THREADS \ - --property hw:cpu_max_sockets=FLAVOR-SOCKETS \ - --property hw:cpu_max_cores=FLAVOR-CORES \ - --property hw:cpu_max_threads=FLAVOR-THREADS - - Where: - - - FLAVOR-SOCKETS: (integer) The number of sockets for the guest VM. By - default, this is set to the number of vCPUs requested. - - FLAVOR-CORES: (integer) The number of cores per socket for the guest VM. By - default, this is set to ``1``. - - FLAVOR-THREADS: (integer) The number of threads per core for the guest VM. - By default, this is set to ``1``. - -.. _extra-specs-cpu-policy: - -CPU pinning policy - For the libvirt driver, you can pin the virtual CPUs (vCPUs) of instances to - the host's physical CPU cores (pCPUs) using properties. You can further - refine this by stating how hardware CPU threads in a simultaneous - multithreading-based (SMT) architecture be used. These configurations will - result in improved per-instance determinism and performance. - - .. note:: - - SMT-based architectures include Intel processors with Hyper-Threading - technology. In these architectures, processor cores share a number of - components with one or more other cores. Cores in such architectures are - commonly referred to as hardware threads, while the cores that a given - core share components with are known as thread siblings. - - .. note:: - - Host aggregates should be used to separate these pinned instances from - unpinned instances as the latter will not respect the resourcing - requirements of the former. +Secure Boot + :doc:`Secure Boot ` can help ensure the bootloader used + for your instances is trusted, preventing a possible attack vector. .. code:: console $ openstack flavor set FLAVOR-NAME \ - --property hw:cpu_policy=CPU-POLICY \ - --property hw:cpu_thread_policy=CPU-THREAD-POLICY - - Valid CPU-POLICY values are: - - - ``shared``: (default) The guest vCPUs will be allowed to freely float - across host pCPUs, albeit potentially constrained by NUMA policy. - - ``dedicated``: The guest vCPUs will be strictly pinned to a set of host - pCPUs. In the absence of an explicit vCPU topology request, the drivers - typically expose all vCPUs as sockets with one core and one thread. When - strict CPU pinning is in effect the guest CPU topology will be setup to - match the topology of the CPUs to which it is pinned. This option implies - an overcommit ratio of 1.0. For example, if a two vCPU guest is pinned to a - single host core with two threads, then the guest will get a topology of - one socket, one core, two threads. - - Valid CPU-THREAD-POLICY values are: - - - ``prefer``: (default) The host may or may not have an SMT architecture. - Where an SMT architecture is present, thread siblings are preferred. - - ``isolate``: The host must not have an SMT architecture or must emulate a - non-SMT architecture. If the host does not have an SMT architecture, each - vCPU is placed on a different core as expected. If the host does have an - SMT architecture - that is, one or more cores have thread siblings - then - each vCPU is placed on a different physical core. No vCPUs from other - guests are placed on the same core. All but one thread sibling on each - utilized core is therefore guaranteed to be unusable. - - ``require``: The host must have an SMT architecture. Each vCPU is allocated - on thread siblings. If the host does not have an SMT architecture, then it - is not used. If the host has an SMT architecture, but not enough cores with - free thread siblings are available, then scheduling fails. - - .. note:: - - The ``hw:cpu_thread_policy`` option is only valid if ``hw:cpu_policy`` is - set to ``dedicated``. - -.. _extra-specs-numa-topology: - -NUMA topology - For the libvirt driver, you can define the host NUMA placement for the - instance vCPU threads as well as the allocation of instance vCPUs and memory - from the host NUMA nodes. For flavors whose memory and vCPU allocations are - larger than the size of NUMA nodes in the compute hosts, the definition of a - NUMA topology allows hosts to better utilize NUMA and improve performance of - the instance OS. + --property os:secure_boot=SECURE_BOOT_OPTION - .. code-block:: console + Valid ``SECURE_BOOT_OPTION`` values are: - $ openstack flavor set FLAVOR-NAME \ - --property hw:numa_nodes=FLAVOR-NODES \ - --property hw:numa_cpus.N=FLAVOR-CORES \ - --property hw:numa_mem.N=FLAVOR-MEMORY - - Where: - - - FLAVOR-NODES: (integer) The number of host NUMA nodes to restrict execution - of instance vCPU threads to. If not specified, the vCPU threads can run on - any number of the host NUMA nodes available. - - N: (integer) The instance NUMA node to apply a given CPU or memory - configuration to, where N is in the range ``0`` to ``FLAVOR-NODES - 1``. - - FLAVOR-CORES: (comma-separated list of integers) A list of instance vCPUs - to map to instance NUMA node N. If not specified, vCPUs are evenly divided - among available NUMA nodes. - - FLAVOR-MEMORY: (integer) The number of MB of instance memory to map to - instance NUMA node N. If not specified, memory is evenly divided among - available NUMA nodes. + - ``required``: Enable Secure Boot for instances running with this flavor. + - ``disabled`` or ``optional``: (default) Disable Secure Boot for instances + running with this flavor. .. note:: - ``hw:numa_cpus.N`` and ``hw:numa_mem.N`` are only valid if - ``hw:numa_nodes`` is set. Additionally, they are only required if the - instance's NUMA nodes have an asymmetrical allocation of CPUs and RAM - (important for some NFV workloads). + Supported by the Hyper-V and libvirt drivers. - .. note:: + .. versionchanged:: 23.0.0 (Wallaby) - The ``N`` parameter is an index of *guest* NUMA nodes and may not - correspond to *host* NUMA nodes. For example, on a platform with two NUMA - nodes, the scheduler may opt to place guest NUMA node 0, as referenced in - ``hw:numa_mem.0`` on host NUMA node 1 and vice versa. Similarly, the - integers used for ``FLAVOR-CORES`` are indexes of *guest* vCPUs and may - not correspond to *host* CPUs. As such, this feature cannot be used to - constrain instances to specific host CPUs or NUMA nodes. + Added support for secure boot to the libvirt driver. - .. warning:: +.. _extra-specs-required-resources: - If the combined values of ``hw:numa_cpus.N`` or ``hw:numa_mem.N`` are - greater than the available number of CPUs or memory respectively, an - exception is raised. +Custom resource classes and standard resource classes to override + Specify custom resource classes to require or override quantity values of + standard resource classes. -.. _extra-specs-realtime-policy: + The syntax of the extra spec is ``resources:=VALUE`` + (``VALUE`` is integer). + The name of custom resource classes must start with ``CUSTOM_``. + Standard resource classes to override are ``VCPU``, ``MEMORY_MB`` or + ``DISK_GB``. In this case, you can disable scheduling based on standard + resource classes by setting the value to ``0``. -CPU real-time policy - For the libvirt driver, you can state that one or more of your instance - virtual CPUs (vCPUs), though not all of them, run with a real-time policy. - When used on a correctly configured host, this provides stronger guarantees - for worst case scheduler latency for vCPUs and is a requirement for certain - applications. + For example: - .. todo:: + - ``resources:CUSTOM_BAREMETAL_SMALL=1`` + - ``resources:VCPU=0`` - Document the required steps to configure hosts and guests. There are a lot - of things necessary, from isolating hosts and configuring the - ``vcpu_pin_set`` nova configuration option on the host, to choosing a - correctly configured guest image. + See :ironic-doc:`Create flavors for use with the Bare Metal service + ` for more examples. - .. important:: + .. versionadded:: 16.0.0 (Pike) - While most of your instance vCPUs can run with a real-time policy, you must - mark at least one vCPU as non-real-time, to be used for both non-real-time - guest processes and emulator overhead (housekeeping) processes. +.. _extra-specs-required-traits: - .. important:: +Required traits + Required traits allow specifying a server to build on a compute node with + the set of traits specified in the flavor. The traits are associated with + the resource provider that represents the compute node in the Placement + API. See the resource provider traits API reference for more details: + https://docs.openstack.org/api-ref/placement/#resource-provider-traits - To use this extra spec, you must enable pinned CPUs. Refer to - :ref:`CPU policy ` for more information. + The syntax of the extra spec is ``trait:=required``, for + example: - .. code:: console + - ``trait:HW_CPU_X86_AVX2=required`` + - ``trait:STORAGE_DISK_SSD=required`` - $ openstack flavor set FLAVOR-NAME \ - --property hw:cpu_realtime=CPU-REALTIME-POLICY \ - --property hw:cpu_realtime_mask=CPU-REALTIME-MASK + The scheduler will pass required traits to the + ``GET /allocation_candidates`` endpoint in the Placement API to include + only resource providers that can satisfy the required traits. In 17.0.0 + the only valid value is ``required``. In 18.0.0 ``forbidden`` is added (see + below). Any other value will be considered + invalid. - Where: + Traits can be managed using the `osc-placement plugin`__. - CPU-REALTIME-POLICY (enum): - One of: + __ https://docs.openstack.org/osc-placement/latest/index.html - - ``no``: (default) The guest vCPUs will not have a real-time policy - - ``yes``: The guest vCPUs will have a real-time policy + .. versionadded:: 17.0.0 (Queens) - CPU-REALTIME-MASK (coremask): - A coremask indicating which vCPUs **will not** have a real-time policy. This - should start with a ``^``. For example, a value of ``^0-1`` indicates that - all vCPUs *except* vCPUs ``0`` and ``1`` will have a real-time policy. +.. _extra-specs-forbidden-traits: - .. note:: +Forbidden traits + Forbidden traits are similar to required traits, described above, but + instead of specifying the set of traits that must be satisfied by a compute + node, forbidden traits must **not** be present. - The ``hw:cpu_realtime_mask`` option is only valid if ``hw:cpu_realtime`` - is set to ``yes``. + The syntax of the extra spec is ``trait:=forbidden``, for + example: -Emulator threads policy - For the libvirt driver, you can assign a separate pCPU to an instance that - will be used for emulator threads, which are emulator processes not directly - related to the guest OS. This pCPU will used in addition to the pCPUs used - for the guest. This is generally required for use with a :ref:`real-time - workload `. + - ``trait:HW_CPU_X86_AVX2=forbidden`` + - ``trait:STORAGE_DISK_SSD=forbidden`` - .. important:: + Traits can be managed using the `osc-placement plugin`__. - To use this extra spec, you must enable pinned CPUs. Refer to :ref:`CPU - policy ` for more information. + __ https://docs.openstack.org/osc-placement/latest/index.html - .. code:: console + .. versionadded:: 18.0.0 (Rocky) - $ openstack flavor set FLAVOR-NAME \ - --property hw:emulator_threads_policy=THREAD-POLICY +.. _extra-specs-numbered-resource-groupings: - Valid THREAD-POLICY values are: +Numbered groupings of resource classes and traits + Specify numbered groupings of resource classes and traits. - - ``share``: (default) The emulator threads float across the pCPUs - associated to the guest. To place a workload's emulator threads on - a set of isolated physical CPUs, set ``share``` and - ``[compute]/cpu_shared_set`` configuration option to the set of - host CPUs that should be used for best-effort CPU resources. + The syntax is as follows (``N`` and ``VALUE`` are integers): - - ``isolate``: The emulator threads are isolated on a single pCPU. + .. parsed-literal:: -Large pages allocation - You can configure the size of large pages used to back the VMs. + resources\ *N*:**\ =\ *VALUE* + trait\ *N*:**\ =required - .. code:: console - - $ openstack flavor set FLAVOR-NAME \ - --property hw:mem_page_size=PAGE_SIZE + A given numbered ``resources`` or ``trait`` key may be repeated to + specify multiple resources/traits in the same grouping, + just as with the un-numbered syntax. - Valid ``PAGE_SIZE`` values are: + Specify inter-group affinity policy via the ``group_policy`` key, + which may have the following values: - - ``small``: (default) The smallest page size is used. Example: 4 KB on x86. - - ``large``: Only use larger page sizes for guest RAM. Example: either 2 MB - or 1 GB on x86. - - ``any``: It is left up to the compute driver to decide. In this case, the - libvirt driver might try to find large pages, but fall back to small pages. - Other drivers may choose alternate policies for ``any``. - - pagesize: (string) An explicit page size can be set if the workload has - specific requirements. This value can be an integer value for the page size - in KB, or can use any standard suffix. Example: ``4KB``, ``2MB``, - ``2048``, ``1GB``. + * ``isolate``: Different numbered request groups will be satisfied by + *different* providers. + * ``none``: Different numbered request groups may be satisfied + by different providers *or* common providers. .. note:: - Large pages can be enabled for guest RAM without any regard to whether the - guest OS will use them or not. If the guest OS chooses not to use huge - pages, it will merely see small pages as before. Conversely, if a guest OS - does intend to use huge pages, it is very important that the guest RAM be - backed by huge pages. Otherwise, the guest OS will not be getting the - performance benefit it is expecting. - -PCI passthrough - You can assign PCI devices to a guest by specifying them in the flavor. - - .. code:: console - - $ openstack flavor set FLAVOR-NAME \ - --property pci_passthrough:alias=ALIAS:COUNT - - Where: + If more than one group is specified then the ``group_policy`` is + mandatory in the request. However such groups might come from other + sources than flavor extra_spec (e.g. from Neutron ports with QoS + minimum bandwidth policy). If the flavor does not specify any groups + and ``group_policy`` but more than one group is coming from other + sources then nova will default the ``group_policy`` to ``none`` to + avoid scheduler failure. - - ALIAS: (string) The alias which correspond to a particular PCI device class - as configured in the nova configuration file (see - :doc:`/configuration/config`). - - COUNT: (integer) The amount of PCI devices of type ALIAS to be assigned to - a guest. - -Hiding hypervisor signature - Some hypervisors add a signature to their guests. While the presence - of the signature can enable some paravirtualization features on the - guest, it can also have the effect of preventing some drivers from - loading. Hiding the signature by setting this property to true may - allow such drivers to load and work. - - .. note:: - - As of the 18.0.0 Rocky release, this is only supported by the libvirt - driver. - - .. code:: console - - $ openstack flavor set FLAVOR-NAME \ - --property hide_hypervisor_id=VALUE - - Where: - - - VALUE: (string) 'true' or 'false'. 'false' is equivalent to the - property not existing. - -Secure Boot - When your Compute services use the Hyper-V hypervisor, you can enable secure - boot for Windows and Linux instances. - - .. code:: console - - $ openstack flavor set FLAVOR-NAME \ - --property os:secure_boot=SECURE_BOOT_OPTION - - Valid ``SECURE_BOOT_OPTION`` values are: - - - ``required``: Enable Secure Boot for instances running with this flavor. - - ``disabled`` or ``optional``: (default) Disable Secure Boot for instances - running with this flavor. - -.. _extra-specs-required-traits: - -Required traits - Added in the 17.0.0 Queens release. - - Required traits allow specifying a server to build on a compute node with - the set of traits specified in the flavor. The traits are associated with - the resource provider that represents the compute node in the Placement - API. See the resource provider traits API reference for more details: - https://developer.openstack.org/api-ref/placement/#resource-provider-traits - - The syntax of the extra spec is ``trait:=required``, for - example: - - - trait:HW_CPU_X86_AVX2=required - - trait:STORAGE_DISK_SSD=required - - The scheduler will pass required traits to the - ``GET /allocation_candidates`` endpoint in the Placement API to include - only resource providers that can satisfy the required traits. In 17.0.0 - the only valid value is ``required``. In 18.0.0 ``forbidden`` is added (see - below). Any other value will be considered - invalid. - - The FilterScheduler is currently the only scheduler driver that supports - this feature. - - Traits can be managed using the `osc-placement plugin`_. - -.. _extra-specs-forbidden-traits: - -Forbidden traits - Added in the 18.0.0 Rocky release. + For example, to create a server with the following VFs: - Forbidden traits are similar to required traits, described above, but - instead of specifying the set of traits that must be satisfied by a compute - node, forbidden traits must **not** be present. + * One SR-IOV virtual function (VF) on NET1 with bandwidth 10000 bytes/sec + * One SR-IOV virtual function (VF) on NET2 with bandwidth 20000 bytes/sec + on a *different* NIC with SSL acceleration - The syntax of the extra spec is ``trait:=forbidden``, for - example: + It is specified in the extra specs as follows:: - - trait:HW_CPU_X86_AVX2=forbidden - - trait:STORAGE_DISK_SSD=forbidden + resources1:SRIOV_NET_VF=1 + resources1:NET_EGRESS_BYTES_SEC=10000 + trait1:CUSTOM_PHYSNET_NET1=required + resources2:SRIOV_NET_VF=1 + resources2:NET_EGRESS_BYTES_SEC:20000 + trait2:CUSTOM_PHYSNET_NET2=required + trait2:HW_NIC_ACCEL_SSL=required + group_policy=isolate - The FilterScheduler is currently the only scheduler driver that supports - this feature. + See `Granular Resource Request Syntax`__ for more details. - Traits can be managed using the `osc-placement plugin`_. + __ https://specs.openstack.org/openstack/nova-specs/specs/rocky/implemented/granular-resource-requests.html -.. _osc-placement plugin: https://docs.openstack.org/osc-placement/latest/index.html + .. versionadded:: 18.0.0 (Rocky) diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index e652c03476f..5facf792ad5 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -8,11 +8,16 @@ End user guide .. toctree:: :maxdepth: 1 + availability-zones launch-instances - config-drive - metadata-service + metadata + manage-ip-addresses certificate-validation resize + reboot + rescue + block-device-mapping + /reference/api-microversion-history .. todo:: The rest of this document should probably move to the admin guide. @@ -27,7 +32,8 @@ Architecture Overview to connect specific block devices to computes. This deserves its own deep dive. -* :doc:`Conductor `: TODO +See the :ref:`reference guide ` for details about more +internal subsystems. Deployment Considerations ------------------------- @@ -52,7 +58,7 @@ the defaults from the :doc:`install guide ` will be sufficient. allows sharding of your compute environment. Upfront planning is key to a successful Cells v2 layout. -* :doc:`Placement service `: Overview of the placement +* :placement-doc:`Placement service <>`: Overview of the placement service, including how it fits in with the rest of nova. * :doc:`Running nova-api on wsgi `: Considerations for using a real @@ -66,19 +72,20 @@ Once you are running nova, the following information is extremely useful. * :doc:`Admin Guide `: A collection of guides for administrating nova. -* :doc:`Upgrades `: How nova is designed to be upgraded for minimal - service impact, and the order you should do them in. - * :doc:`Quotas `: Managing project quotas in nova. -* :doc:`Aggregates `: Aggregates are a useful way of grouping - hosts together for scheduling purposes. +* :doc:`Availablity Zones `: Availability Zones are + an end-user visible logical abstraction for partitioning a cloud without + knowing the physical infrastructure. They can be used to partition a cloud on + arbitrary factors, such as location (country, datacenter, rack), network + layout and/or power source. -* :doc:`Filter Scheduler `: How the filter scheduler is +* :doc:`Scheduling `: How the scheduler is configured, and how that will impact where compute instances land in your environment. If you are seeing unexpected distribution of compute instances in your hosts, you'll want to dive into this configuration. -* :doc:`Exposing custom metadata to compute instances `: How and - when you might want to extend the basic metadata exposed to compute instances - (either via metadata server or config drive) for your specific purposes. +* :doc:`Exposing custom metadata to compute instances `: How + and when you might want to extend the basic metadata exposed to compute + instances (either via metadata server or config drive) for your specific + purposes. diff --git a/doc/source/user/launch-instance-from-image.rst b/doc/source/user/launch-instance-from-image.rst index a31fc18c0d5..dd4ee523d63 100644 --- a/doc/source/user/launch-instance-from-image.rst +++ b/doc/source/user/launch-instance-from-image.rst @@ -18,8 +18,8 @@ Follow the steps below to launch an instance from an image. For example, you can add a description for your server by providing the ``--property description="My Server"`` parameter. - You can pass user data in a local file at instance launch by using the - ``--user-data USER-DATA-FILE`` parameter. + You can pass :ref:`user data ` in a local file at + instance launch by using the ``--user-data USER-DATA-FILE`` parameter. .. important:: diff --git a/doc/source/user/launch-instance-from-volume.rst b/doc/source/user/launch-instance-from-volume.rst index a79e2cf5915..f3c2636d5b2 100644 --- a/doc/source/user/launch-instance-from-volume.rst +++ b/doc/source/user/launch-instance-from-volume.rst @@ -13,7 +13,7 @@ To complete these tasks, use these parameters on the :widths: 30 15 30 * - Task - - openstack server create parameter + - openstack server create parameter(s) - Information * - Boot an instance from an image and attach a non-bootable volume. @@ -21,10 +21,10 @@ To complete these tasks, use these parameters on the - :ref:`Boot_instance_from_image_and_attach_non-bootable_volume` * - Create a volume from an image and boot an instance from that volume. - - ``--block-device`` + - ``--boot-from-volume`` and ``--image``; ``--block-device`` - :ref:`Create_volume_from_image_and_boot_instance` * - Boot from an existing source image, volume, or snapshot. - - ``--block-device`` + - ``--volume`` or ``--snapshot``; ``--block-device`` - :ref:`Create_volume_from_image_and_boot_instance` * - Attach a swap disk to an instance. - ``--swap`` @@ -39,12 +39,18 @@ To complete these tasks, use these parameters on the :cinder-doc:`Cinder documentation `. +.. note:: + + The maximum limit on the number of disk devices allowed to attach to + a single server is configurable with the option + :oslo.config:option:`compute.max_disk_devices_to_attach`. + .. _Boot_instance_from_image_and_attach_non-bootable_volume: Boot instance from image and attach non-bootable volume -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------------- -Create a non-bootable volume and attach that volume to an instance that +You can create a non-bootable volume and attach that volume to an instance that you boot from an image. To create a non-bootable volume, do not create it from an image. The @@ -55,7 +61,7 @@ system. .. code-block:: console - $ openstack volume create --size 8 my-volume + $ openstack volume create --size 8 test-volume +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ @@ -63,210 +69,206 @@ system. | availability_zone | nova | | bootable | false | | consistencygroup_id | None | - | created_at | 2016-11-25T10:37:08.850997 | + | created_at | 2021-06-01T15:01:31.000000 | | description | None | | encrypted | False | - | id | b8f7bbec-6274-4cd7-90e7-60916a5e75d4 | + | id | 006efd7a-48a8-4c75-bafb-6b483199d284 | | migration_status | None | | multiattach | False | - | name | my-volume | + | name | test-volume | | properties | | - | replication_status | disabled | + | replication_status | None | | size | 8 | | snapshot_id | None | | source_volid | None | | status | creating | - | type | None | + | type | lvmdriver-1 | | updated_at | None | - | user_id | 0678735e449149b0a42076e12dd54e28 | + | user_id | 0a4d2edb9042412ba4f719a547d42f79 | +---------------------+--------------------------------------+ -#. List volumes. +#. List volumes and confirm that it is in the ``available`` state. .. code-block:: console $ openstack volume list - +--------------------------------------+--------------+-----------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | - +--------------------------------------+--------------+-----------+------+-------------+ - | b8f7bbec-6274-4cd7-90e7-60916a5e75d4 | my-volume | available | 8 | | - +--------------------------------------+--------------+-----------+------+-------------+ + +--------------------------------------+-------------+-----------+------+-------------+ + | ID | Name | Status | Size | Attached to | + +--------------------------------------+-------------+-----------+------+-------------+ + | 006efd7a-48a8-4c75-bafb-6b483199d284 | test-volume | available | 8 | | + +--------------------------------------+-------------+-----------+------+-------------+ -#. Boot an instance from an image and attach the empty volume to the - instance. +#. Create an instance, specifying the volume as a block device to attach. .. code-block:: console - $ openstack server create --flavor 2 --image 98901246-af91-43d8-b5e6-a4506aa8f369 \ - --block-device source=volume,id=d620d971-b160-4c4e-8652-2513d74e2080,dest=volume,shutdown=preserve \ - myInstanceWithVolume - +--------------------------------------+--------------------------------------------+ - | Field | Value | - +--------------------------------------+--------------------------------------------+ - | OS-DCF:diskConfig | MANUAL | - | OS-EXT-AZ:availability_zone | nova | - | OS-EXT-SRV-ATTR:host | - | - | OS-EXT-SRV-ATTR:hypervisor_hostname | - | - | OS-EXT-SRV-ATTR:instance_name | instance-00000004 | - | OS-EXT-STS:power_state | 0 | - | OS-EXT-STS:task_state | scheduling | - | OS-EXT-STS:vm_state | building | - | OS-SRV-USG:launched_at | - | - | OS-SRV-USG:terminated_at | - | - | accessIPv4 | | - | accessIPv6 | | - | adminPass | ZaiYeC8iucgU | - | config_drive | | - | created | 2014-05-09T16:34:50Z | - | flavor | m1.small (2) | - | hostId | | - | id | 1e1797f3-1662-49ff-ae8c-a77e82ee1571 | - | image | cirros-0.3.5-x86_64-uec (98901246-af91-... | - | key_name | - | - | metadata | {} | - | name | myInstanceWithVolume | - | os-extended-volumes:volumes_attached | [{"id": "d620d971-b160-4c4e-8652-2513d7... | - | progress | 0 | - | security_groups | default | - | status | BUILD | - | tenant_id | ccef9e62b1e645df98728fb2b3076f27 | - | updated | 2014-05-09T16:34:51Z | - | user_id | fef060ae7bfd4024b3edb97dff59017a | - +--------------------------------------+--------------------------------------------+ + $ openstack server create \ + --flavor $FLAVOR --image $IMAGE --network $NETWORK \ + --block-device uuid=006efd7a-48a8-4c75-bafb-6b483199d284,source_type=volume,destination_type=volume \ + --wait test-server + +-------------------------------------+-----------------------------------------------------------------+ + | Field | Value | + +-------------------------------------+-----------------------------------------------------------------+ + | OS-DCF:diskConfig | MANUAL | + | OS-EXT-AZ:availability_zone | nova | + | OS-EXT-SRV-ATTR:host | devstack-ubuntu2004 | + | OS-EXT-SRV-ATTR:hypervisor_hostname | devstack-ubuntu2004 | + | OS-EXT-SRV-ATTR:instance_name | instance-00000008 | + | OS-EXT-STS:power_state | Running | + | OS-EXT-STS:task_state | None | + | OS-EXT-STS:vm_state | active | + | OS-SRV-USG:launched_at | 2021-06-01T15:13:48.000000 | + | OS-SRV-USG:terminated_at | None | + | accessIPv4 | | + | accessIPv6 | | + | addresses | private=10.0.0.55, fde3:4790:906b:0:f816:3eff:fed5:ebd9 | + | adminPass | CZ76LZ9pNXzt | + | config_drive | | + | created | 2021-06-01T15:13:37Z | + | flavor | m1.tiny (1) | + | hostId | 425d65fe75c1e53cecbd32d3e686314235507b6edebbeaa56ff341c7 | + | id | 446d1b00-b729-49b3-9dab-40a3fbe190cf | + | image | cirros-0.5.1-x86_64-disk (44d317a3-6183-4063-868b-aa0728576f5f) | + | key_name | None | + | name | test-server | + | progress | 0 | + | project_id | ae93f388f934458c8e6583f8ab0dba2d | + | properties | | + | security_groups | name='default' | + | status | ACTIVE | + | updated | 2021-06-01T15:13:49Z | + | user_id | 0a4d2edb9042412ba4f719a547d42f79 | + | volumes_attached | id='006efd7a-48a8-4c75-bafb-6b483199d284' | + +-------------------------------------+-----------------------------------------------------------------+ + +#. List volumes once again to ensure the status has changed to ``in-use`` and + the volume is correctly reporting the attachment. + + .. code-block:: console + + $ openstack volume list + +--------------------------------------+-------------+--------+------+--------------------------------------+ + | ID | Name | Status | Size | Attached to | + +--------------------------------------+-------------+--------+------+--------------------------------------+ + | 006efd7a-48a8-4c75-bafb-6b483199d284 | test-volume | in-use | 1 | Attached to test-server on /dev/vdb | + +--------------------------------------+-------------+--------+------+--------------------------------------+ .. _Create_volume_from_image_and_boot_instance: -Create volume from image and boot instance -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Boot instance from volume +------------------------- -You can create a volume from an existing image, volume, or snapshot. -This procedure shows you how to create a volume from an image, and use -the volume to boot an instance. +You can create a bootable volume from an existing image, volume, or snapshot. +This procedure shows you how to create a volume from an image and use the +volume to boot an instance. -#. List the available images. +#. List available images, noting the ID of the image that you wish to use. .. code-block:: console $ openstack image list - +-----------------+---------------------------------+--------+ - | ID | Name | Status | - +-----------------+---------------------------------+--------+ - | 484e05af-a14... | Fedora-x86_64-20-20131211.1-sda | active | - | 98901246-af9... | cirros-0.3.5-x86_64-uec | active | - | b6e95589-7eb... | cirros-0.3.5-x86_64-uec-kernel | active | - | c90893ea-e73... | cirros-0.3.5-x86_64-uec-ramdisk | active | - +-----------------+---------------------------------+--------+ - - Note the ID of the image that you want to use to create a volume. - - If you want to create a volume to a specific storage backend, you need - to use an image which has *cinder_img_volume_type* property. - In this case, a new volume will be created as *storage_backend1* volume - type. - - .. code-block:: console + +--------------------------------------+--------------------------+--------+ + | ID | Name | Status | + +--------------------------------------+--------------------------+--------+ + | 44d317a3-6183-4063-868b-aa0728576f5f | cirros-0.5.1-x86_64-disk | active | + +--------------------------------------+--------------------------+--------+ - $ openstack image show 98901246-af9d-4b61-bea8-09cc6dc41829 - +------------------+------------------------------------------------------+ - | Field | Value | - +------------------+------------------------------------------------------+ - | checksum | ee1eca47dc88f4879d8a229cc70a07c6 | - | container_format | bare | - | created_at | 2016-10-08T14:59:05Z | - | disk_format | qcow2 | - | file | /v2/images/9fef3b2d-c35d-4b61-bea8-09cc6dc41829/file | - | id | 98901246-af9d-4b61-bea8-09cc6dc41829 | - | min_disk | 0 | - | min_ram | 0 | - | name | cirros-0.3.5-x86_64-uec | - | owner | 8d8ef3cdf2b54c25831cbb409ad9ae86 | - | protected | False | - | schema | /v2/schemas/image | - | size | 13287936 | - | status | active | - | tags | | - | updated_at | 2016-10-19T09:12:52Z | - | virtual_size | None | - | visibility | public | - +------------------+------------------------------------------------------+ - -#. List the available flavors. +#. Create an instance, using the chosen image and requesting "boot from volume" + behavior. .. code-block:: console - $ openstack flavor list - +-----+-----------+-------+------+-----------+-------+-----------+ - | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is_Public | - +-----+-----------+-------+------+-----------+-------+-----------+ - | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | - | 2 | m1.small | 2048 | 20 | 0 | 1 | True | - | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | - | 4 | m1.large | 8192 | 80 | 0 | 4 | True | - | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | - +-----+-----------+-------+------+-----------+-------+-----------+ + $ openstack server create \ + --flavor $FLAVOR --network $NETWORK \ + --image 44d317a3-6183-4063-868b-aa0728576f5f --boot-from-volume 10 \ + --wait test-server + +-------------------------------------+----------------------------------------------------------+ + | Field | Value | + +-------------------------------------+----------------------------------------------------------+ + | OS-DCF:diskConfig | MANUAL | + | OS-EXT-AZ:availability_zone | nova | + | OS-EXT-SRV-ATTR:host | devstack-ubuntu2004 | + | OS-EXT-SRV-ATTR:hypervisor_hostname | devstack-ubuntu2004 | + | OS-EXT-SRV-ATTR:instance_name | instance-0000000c | + | OS-EXT-STS:power_state | Running | + | OS-EXT-STS:task_state | None | + | OS-EXT-STS:vm_state | active | + | OS-SRV-USG:launched_at | 2021-06-01T16:02:06.000000 | + | OS-SRV-USG:terminated_at | None | + | accessIPv4 | | + | accessIPv6 | | + | addresses | private=10.0.0.3, fde3:4790:906b:0:f816:3eff:fe40:bdd | + | adminPass | rqT3RUYYa5H5 | + | config_drive | | + | created | 2021-06-01T16:01:55Z | + | flavor | m1.tiny (1) | + | hostId | 425d65fe75c1e53cecbd32d3e686314235507b6edebbeaa56ff341c7 | + | id | 69b09fa0-6f24-4924-8311-c9bcdeb90dcb | + | image | N/A (booted from volume) | + | key_name | None | + | name | test-server | + | progress | 0 | + | project_id | ae93f388f934458c8e6583f8ab0dba2d | + | properties | | + | security_groups | name='default' | + | status | ACTIVE | + | updated | 2021-06-01T16:02:07Z | + | user_id | 0a4d2edb9042412ba4f719a547d42f79 | + | volumes_attached | id='673cbfcb-351c-42cb-9659-bca5b2a0361c' | + +-------------------------------------+----------------------------------------------------------+ - Note the ID of the flavor that you want to use to create a volume. + .. note:: -#. To create a bootable volume from an image and launch an instance from - this volume, use the ``--block-device`` parameter. + Volumes created in this manner will not be deleted when the server is + deleted and will need to be manually deleted afterwards. If you wish to + change this behavior, you will need to pre-create the volume manually as + discussed below. - For example: +#. List volumes to ensure a new volume has been created and that its status is + ``in-use`` and the volume is correctly reporting the attachment. .. code-block:: console - $ openstack server create --flavor FLAVOR --block-device \ - source=SOURCE,id=ID,dest=DEST,size=SIZE,shutdown=PRESERVE,bootindex=INDEX \ - NAME - - The parameters are: - - - ``--flavor`` - The flavor ID or name. - - - ``--block-device`` - source=SOURCE,id=ID,dest=DEST,size=SIZE,shutdown=PRESERVE,bootindex=INDEX - - **source=SOURCE** - The type of object used to create the block device. Valid values - are ``volume``, ``snapshot``, ``image``, and ``blank``. - - **id=ID** - The ID of the source object. - - **dest=DEST** - The type of the target virtual device. Valid values are ``volume`` - and ``local``. - - **size=SIZE** - The size of the volume that is created. - - **shutdown={preserve\|remove}** - What to do with the volume when the instance is deleted. - ``preserve`` does not delete the volume. ``remove`` deletes the - volume. - - **bootindex=INDEX** - Orders the boot disks. Use ``0`` to boot from this volume. - - - ``NAME``. The name for the server. - - See :doc:`block-device-mapping` for more details on these parameters. -#. Create a bootable volume from an image. Cinder makes a volume bootable - when ``--image`` parameter is passed. + $ openstack volume list + +--------------------------------------+------+--------+------+--------------------------------------+ + | ID | Name | Status | Size | Attached to | + +--------------------------------------+------+--------+------+--------------------------------------+ + | 673cbfcb-351c-42cb-9659-bca5b2a0361c | | in-use | 1 | Attached to test-server on /dev/vda | + +--------------------------------------+------+--------+------+--------------------------------------+ + + $ openstack server volume list test-server + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ + | ID | Device | Server ID | Volume ID | + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ + | 673cbfcb-351c-42cb-9659-bca5b2a0361c | /dev/vda | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | 673cbfcb-351c-42cb-9659-bca5b2a0361c | + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ + +Rather than relying on nova to create the volume from the image, it is also +possible to pre-create the volume before creating the instance. This can be +useful when you want more control over the created volume, such as enabling +encryption. + +#. List available images, noting the ID of the image that you wish to use. .. code-block:: console - $ openstack volume create --image IMAGE_ID --size SIZE_IN_GB bootable_volume + $ openstack image list + +--------------------------------------+--------------------------+--------+ + | ID | Name | Status | + +--------------------------------------+--------------------------+--------+ + | 44d317a3-6183-4063-868b-aa0728576f5f | cirros-0.5.1-x86_64-disk | active | + +--------------------------------------+--------------------------+--------+ - .. note:: +#. Create a bootable volume from the chosen image. - A bootable encrypted volume can also be created by adding the - `--type ENCRYPTED_VOLUME_TYPE` parameter to the volume create command: + Cinder makes a volume bootable when ``--image`` parameter is passed. .. code-block:: console - $ openstack volume create --type ENCRYPTED_VOLUME_TYPE --image IMAGE_ID --size SIZE_IN_GB bootable_volume + $ openstack volume create \ + --image 44d317a3-6183-4063-868b-aa0728576f5f --size 10 \ + test-volume +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ @@ -274,100 +276,137 @@ the volume to boot an instance. | availability_zone | nova | | bootable | false | | consistencygroup_id | None | - | created_at | 2017-06-13T18:59:57.626872 | + | created_at | 2021-06-01T15:40:56.000000 | | description | None | - | encrypted | True | - | id | ded57a86-5b51-43ab-b70e-9bc0f91ef4ab | + | encrypted | False | + | id | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | + | migration_status | None | | multiattach | False | - | name | bootable_volume | + | name | test-volume | | properties | | | replication_status | None | - | size | 1 | + | size | 10 | | snapshot_id | None | | source_volid | None | | status | creating | - | type | LUKS | + | type | lvmdriver-1 | | updated_at | None | - | user_id | 459ae34ffcd94edab0c128ed616bb19f | + | user_id | 0a4d2edb9042412ba4f719a547d42f79 | +---------------------+--------------------------------------+ + .. note:: + + If you want to create a volume to a specific storage backend, you need + to use an image which has the ``cinder_img_volume_type`` property. For + more information, refer to the :cinder-doc:`cinder docs + `. - This requires an encrypted volume type, which must be created ahead of - time by an admin. Refer to - :horizon-doc:`admin/manage-volumes.html#create-an-encrypted-volume-type`. - in the OpenStack Horizon Administration Guide. + .. note:: + + A bootable encrypted volume can also be created by adding the + `--type ENCRYPTED_VOLUME_TYPE` parameter to the volume create command. + For example: + + .. code-block:: console + + $ openstack volume create \ + --type ENCRYPTED_VOLUME_TYPE --image IMAGE --size SIZE \ + test-volume + + This requires an encrypted volume type which must be created ahead of + time by an admin. Refer to + :horizon-doc:`the horizon documentation `. + for more information. -#. Create a VM from previously created bootable volume. The volume is not - deleted when the instance is terminated. +#. Create an instance, specifying the volume as the boot device. .. code-block:: console - $ openstack server create --flavor 2 --volume VOLUME_ID \ - --block-device source=volume,id=$VOLUME_ID,dest=volume,size=10,shutdown=preserve,bootindex=0 \ - myInstanceFromVolume - +--------------------------------------+--------------------------------+ - | Field | Value | - +--------------------------------------+--------------------------------+ - | OS-EXT-STS:task_state | scheduling | - | image | Attempt to boot from volume | - | | - no image supplied | - | OS-EXT-STS:vm_state | building | - | OS-EXT-SRV-ATTR:instance_name | instance-00000003 | - | OS-SRV-USG:launched_at | None | - | flavor | m1.small | - | id | 2e65c854-dba9-4f68-8f08-fe3... | - | security_groups | [{u'name': u'default'}] | - | user_id | 352b37f5c89144d4ad053413926... | - | OS-DCF:diskConfig | MANUAL | - | accessIPv4 | | - | accessIPv6 | | - | progress | 0 | - | OS-EXT-STS:power_state | 0 | - | OS-EXT-AZ:availability_zone | nova | - | config_drive | | - | status | BUILD | - | updated | 2014-02-02T13:29:54Z | - | hostId | | - | OS-EXT-SRV-ATTR:host | None | - | OS-SRV-USG:terminated_at | None | - | key_name | None | - | OS-EXT-SRV-ATTR:hypervisor_hostname | None | - | name | myInstanceFromVolume | - | adminPass | TzjqyGsRcJo9 | - | tenant_id | f7ac731cc11f40efbc03a9f9e1d... | - | created | 2014-02-02T13:29:53Z | - | os-extended-volumes:volumes_attached | [{"id": "2fff50ab..."}] | - | metadata | {} | - +--------------------------------------+--------------------------------+ - -#. List volumes to see the bootable volume and its attached - ``myInstanceFromVolume`` instance. + $ openstack server create \ + --flavor $FLAVOR --network $NETWORK \ + --volume 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005\ + --wait test-server + +-------------------------------------+----------------------------------------------------------+ + | Field | Value | + +-------------------------------------+----------------------------------------------------------+ + | OS-DCF:diskConfig | MANUAL | + | OS-EXT-AZ:availability_zone | nova | + | OS-EXT-SRV-ATTR:host | devstack-ubuntu2004 | + | OS-EXT-SRV-ATTR:hypervisor_hostname | devstack-ubuntu2004 | + | OS-EXT-SRV-ATTR:instance_name | instance-0000000a | + | OS-EXT-STS:power_state | Running | + | OS-EXT-STS:task_state | None | + | OS-EXT-STS:vm_state | active | + | OS-SRV-USG:launched_at | 2021-06-01T15:43:21.000000 | + | OS-SRV-USG:terminated_at | None | + | accessIPv4 | | + | accessIPv6 | | + | addresses | private=10.0.0.47, fde3:4790:906b:0:f816:3eff:fe89:b004 | + | adminPass | ueX74zzHWqL4 | + | config_drive | | + | created | 2021-06-01T15:43:13Z | + | flavor | m1.tiny (1) | + | hostId | 425d65fe75c1e53cecbd32d3e686314235507b6edebbeaa56ff341c7 | + | id | 367b7d42-627c-4d10-a2a0-f759501499a6 | + | image | N/A (booted from volume) | + | key_name | None | + | name | test-server | + | progress | 0 | + | project_id | ae93f388f934458c8e6583f8ab0dba2d | + | properties | | + | security_groups | name='default' | + | status | ACTIVE | + | updated | 2021-06-01T15:43:22Z | + | user_id | 0a4d2edb9042412ba4f719a547d42f79 | + | volumes_attached | id='9c7f68d4-4d84-4c1e-83af-b8c6a56ad005' | + +-------------------------------------+----------------------------------------------------------+ + + .. note:: + + The example here uses the ``--volume`` option for simplicity. The + ``--block-device`` option could also be used for more granular control + over the parameters. See the `openstack server create`__ documentation for + details. + + .. __: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/server.html#server-create + +#. List volumes once again to ensure the status has changed to ``in-use`` and + the volume is correctly reporting the attachment. .. code-block:: console $ openstack volume list - +---------------------+-----------------+--------+------+---------------------------------+ - | ID | Display Name | Status | Size | Attached to | - +---------------------+-----------------+--------+------+---------------------------------+ - | c612f739-8592-44c4- | bootable_volume | in-use | 10 | Attached to myInstanceFromVolume| - | b7d4-0fee2fe1da0c | | | | on /dev/vda | - +---------------------+-----------------+--------+------+---------------------------------+ + +--------------------------------------+-------------+--------+------+--------------------------------------+ + | ID | Name | Status | Size | Attached to | + +--------------------------------------+-------------+--------+------+--------------------------------------+ + | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | test-volume | in-use | 10 | Attached to test-server on /dev/vda | + +--------------------------------------+-------------+--------+------+--------------------------------------+ + + $ openstack server volume list test-server + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ + | ID | Device | Server ID | Volume ID | + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ + | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | /dev/vda | c2368c38-6a7d-4fe8-bc4e-483e90e7608b | 9c7f68d4-4d84-4c1e-83af-b8c6a56ad005 | + +--------------------------------------+----------+--------------------------------------+--------------------------------------+ .. _Attach_swap_or_ephemeral_disk_to_an_instance: Attach swap or ephemeral disk to an instance -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +-------------------------------------------- -Use the ``nova boot`` ``--swap`` parameter to attach a swap disk on boot -or the ``nova boot`` ``--ephemeral`` parameter to attach an ephemeral -disk on boot. When you terminate the instance, both disks are deleted. +Use the ``--swap`` option of the ``openstack server`` command to attach a swap +disk on boot or the ``--ephemeral`` option to attach an ephemeral disk on boot. +The latter can be specified multiple times. When you terminate the instance, +both disks are deleted. Boot an instance with a 512 MB swap disk and 2 GB ephemeral disk. .. code-block:: console - $ nova boot --flavor FLAVOR --image IMAGE_ID --swap 512 \ - --ephemeral size=2 NAME + $ openstack server create \ + --flavor FLAVOR --image IMAGE --network NETWORK \ + --ephemeral size=2 --swap 512 + --wait test-server .. note:: diff --git a/doc/source/user/launch-instance-using-ISO-image.rst b/doc/source/user/launch-instance-using-ISO-image.rst index 2e018b7c0fe..612a56917b8 100644 --- a/doc/source/user/launch-instance-using-ISO-image.rst +++ b/doc/source/user/launch-instance-using-ISO-image.rst @@ -114,7 +114,7 @@ using ISO image actually functional. $ openstack volume list +--------------------------+-------------------------+-----------+------+-------------+ - | ID | Display Name | Status | Size | Attached to | + | ID | Name | Status | Size | Attached to | +--------------------------+-------------------------+-----------+------+-------------+ | 8edd7c97-1276-47a5-9563- |dc01d873-d0f1-40b6-bfcc- | available | 10 | | | 1025f4264e4f | 26a8d955a1d9-blank-vol | | | | diff --git a/doc/source/user/launch-instances.rst b/doc/source/user/launch-instances.rst index 57cbb8822c5..00738521616 100644 --- a/doc/source/user/launch-instances.rst +++ b/doc/source/user/launch-instances.rst @@ -16,10 +16,10 @@ Before you can launch an instance, gather the following parameters: available hardware configuration for a server. It defines the size of a virtual server that can be launched. -- Any **user data** files. A user data file is a special key in the - metadata service that holds a file that cloud-aware applications in - the guest instance can access. For example, one application that uses - user data is the +- Any **user data** files. A :ref:`user data ` file is a + special key in the metadata service that holds a file that cloud-aware + applications in the guest instance can access. For example, one application + that uses user data is the `cloud-init `__ system, which is an open-source package from Ubuntu that is available on various Linux distributions and that handles early initialization of @@ -162,7 +162,7 @@ You can launch an instance from various sources. .. toctree:: :maxdepth: 2 - launch-instance-from-image.rst - launch-instance-from-volume.rst - launch-instance-using-ISO-image.rst + launch-instance-from-image + launch-instance-from-volume + launch-instance-using-ISO-image diff --git a/doc/source/user/metadata-service.rst b/doc/source/user/metadata-service.rst deleted file mode 100644 index 25405cd2062..00000000000 --- a/doc/source/user/metadata-service.rst +++ /dev/null @@ -1,158 +0,0 @@ -================ -Metadata service -================ - -This document provides end user information about the metadata service. For -deployment information about the metadata service, see the -:ref:`admin guide `. - -Compute uses a metadata service for virtual machine instances to retrieve -instance-specific data. Instances access the metadata service at -``http://169.254.169.254``. The metadata service supports two sets of APIs: an -OpenStack metadata API and an EC2-compatible API. Both APIs are versioned by -date. - -To retrieve a list of supported versions for the OpenStack metadata API, make a -GET request to ``http://169.254.169.254/openstack``: - -.. code-block:: console - - $ curl http://169.254.169.254/openstack - 2012-08-10 - 2013-04-04 - 2013-10-17 - 2015-10-15 - 2016-06-30 - 2016-10-06 - 2017-02-22 - 2018-08-27 - latest - -To list supported versions for the EC2-compatible metadata API, make a GET -request to ``http://169.254.169.254``: - -.. code-block:: console - - $ curl http://169.254.169.254 - 1.0 - 2007-01-19 - 2007-03-01 - 2007-08-29 - 2007-10-10 - 2007-12-15 - 2008-02-01 - 2008-09-01 - 2009-04-04 - latest - -If you write a consumer for one of these APIs, always attempt to access the -most recent API version supported by your consumer first, then fall back to an -earlier version if the most recent one is not available. - -Metadata from the OpenStack API is distributed in JSON format. To retrieve the -metadata, make a GET request to -``http://169.254.169.254/openstack/2012-08-10/meta_data.json``: - -.. code-block:: console - - $ curl http://169.254.169.254/openstack/2012-08-10/meta_data.json - -.. code-block:: json - - { - "uuid": "d8e02d56-2648-49a3-bf97-6be8f1204f38", - "availability_zone": "nova", - "hostname": "test.novalocal", - "launch_index": 0, - "meta": { - "priority": "low", - "role": "webserver" - }, - "project_id": "f7ac731cc11f40efbc03a9f9e1d1d21f", - "public_keys": { - "mykey": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDYVEprvtYJXVOBN0XNKV\ - VRNCRX6BlnNbI+USLGais1sUWPwtSg7z9K9vhbYAPUZcq8c/s5S9dg5vTH\ - bsiyPCIDOKyeHba4MUJq8Oh5b2i71/3BISpyxTBH/uZDHdslW2a+SrPDCe\ - uMMoss9NFhBdKtDkdG9zyi0ibmCP6yMdEX8Q== Generated by Nova\n" - }, - "name": "test" - } - -Instances also retrieve user data (passed as the ``user_data`` parameter in the -API call or by the ``--user_data`` flag in the :command:`openstack server -create` command) through the metadata service, by making a GET request to -``http://169.254.169.254/openstack/2012-08-10/user_data``: - -.. code-block:: console - - $ curl http://169.254.169.254/openstack/2012-08-10/user_data - #!/bin/bash - echo 'Extra user data here' - -The metadata service has an API that is compatible with version 2009-04-04 of -the `Amazon EC2 metadata service -`__. -This means that virtual machine images designed for EC2 will work properly with -OpenStack. - -The EC2 API exposes a separate URL for each metadata element. Retrieve a -listing of these elements by making a GET query to -``http://169.254.169.254/2009-04-04/meta-data/``: - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/ - ami-id - ami-launch-index - ami-manifest-path - block-device-mapping/ - hostname - instance-action - instance-id - instance-type - kernel-id - local-hostname - local-ipv4 - placement/ - public-hostname - public-ipv4 - public-keys/ - ramdisk-id - reservation-id - security-groups - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/block-device-mapping/ - ami - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/placement/ - availability-zone - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/public-keys/ - 0=mykey - -Instances can retrieve the public SSH key (identified by keypair name when a -user requests a new instance) by making a GET request to -``http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key``: - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDYVEprvtYJXVOBN0XNKVVRNCRX6BlnNbI+US\ - LGais1sUWPwtSg7z9K9vhbYAPUZcq8c/s5S9dg5vTHbsiyPCIDOKyeHba4MUJq8Oh5b2i71/3B\ - ISpyxTBH/uZDHdslW2a+SrPDCeuMMoss9NFhBdKtDkdG9zyi0ibmCP6yMdEX8Q== Generated\ - by Nova - -Instances can retrieve user data by making a GET request to -``http://169.254.169.254/2009-04-04/user-data``: - -.. code-block:: console - - $ curl http://169.254.169.254/2009-04-04/user-data - #!/bin/bash - echo 'Extra user data here' diff --git a/doc/source/user/metadata.rst b/doc/source/user/metadata.rst new file mode 100644 index 00000000000..f5f39231ace --- /dev/null +++ b/doc/source/user/metadata.rst @@ -0,0 +1,504 @@ +======== +Metadata +======== + +Nova presents configuration information to instances it starts via a mechanism +called metadata. These mechanisms are widely used via helpers such as +`cloud-init`_ to specify things like the root password the instance should use. + +This metadata is made available via either a *config drive* or the *metadata +service* and can be somewhat customised by the user using the *user data* +feature. This guide provides an overview of these features along with a summary +of the types of metadata available. + +.. _cloud-init: https://cloudinit.readthedocs.io/en/latest/ + + +Types of metadata +----------------- + +There are three separate groups of users who need to be able to specify +metadata for an instance. + +User provided data +~~~~~~~~~~~~~~~~~~ + +The user who booted the instance can pass metadata to the instance in several +ways. For authentication keypairs, the keypairs functionality of the nova API +can be used to upload a key and then specify that key during the nova boot API +request. For less structured data, a small opaque blob of data may be passed +via the :ref:`user data ` feature of the nova API. Examples +of such unstructured data would be the puppet role that the instance should use, +or the HTTP address of a server from which to fetch post-boot configuration +information. + +Nova provided data +~~~~~~~~~~~~~~~~~~ + +Nova itself needs to pass information to the instance via its internal +implementation of the metadata system. Such information includes the requested +hostname for the instance and the availability zone the instance is in. This +happens by default and requires no configuration by the user or deployer. + +Nova provides both an :ref:`OpenStack metadata API ` +and an :ref:`EC2-compatible API `. Both the OpenStack +metadata and EC2-compatible APIs are versioned by date. These are described +later. + +Deployer provided data +~~~~~~~~~~~~~~~~~~~~~~ + +A deployer of OpenStack may need to pass data to an instance. It is also +possible that this data is not known to the user starting the instance. An +example might be a cryptographic token to be used to register the instance with +Active Directory post boot -- the user starting the instance should not have +access to Active Directory to create this token, but the nova deployment might +have permissions to generate the token on the user's behalf. This is possible +using the :ref:`vendordata ` feature, which must be +configured by your cloud operator. + + + +.. _metadata-service: + +The metadata service +-------------------- + +.. note:: + + This section provides end user information about the metadata service. For + deployment information about the metadata service, refer to the :doc:`admin + guide `. + +The *metadata service* provides a way for instances to retrieve +instance-specific data via a REST API. Instances access this service at +``169.254.169.254`` or at ``fe80::a9fe:a9fe``. All types of metadata, +be it user-, nova- or vendor-provided, can be accessed via this service. + +.. versionchanged:: 22.0.0 + + Starting with the Victoria release the metadata service is accessible + over IPv6 at the link-local address ``fe80::a9fe:a9fe``. + +.. note:: + + As with all IPv6 link-local addresses, the metadata IPv6 + address is not complete without a zone identifier (in a Linux + guest that is usually the interface name concatenated after + a percent sign). Please also note that in URLs you should + URL-encode the percent sign itself. For example, assuming + that the primary network interface in the guest is ``ens2`` + substitute ``http://[fe80::a9fe:a9fe%25ens2]:80/...`` for + ``http://169.254.169.254/...``. + +Using the metadata service +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To retrieve a list of supported versions for the :ref:`OpenStack metadata API +`, make a GET request to +``http://169.254.169.254/openstack``, which will return a list of directories: + +.. code-block:: console + + $ curl http://169.254.169.254/openstack + 2012-08-10 + 2013-04-04 + 2013-10-17 + 2015-10-15 + 2016-06-30 + 2016-10-06 + 2017-02-22 + 2018-08-27 + latest + +Refer to :ref:`OpenStack format metadata ` for +information on the contents and structure of these directories. + +To list supported versions for the :ref:`EC2-compatible metadata API +`, make a GET request to ``http://169.254.169.254``, which +will, once again, return a list of directories: + +.. code-block:: console + + $ curl http://169.254.169.254 + 1.0 + 2007-01-19 + 2007-03-01 + 2007-08-29 + 2007-10-10 + 2007-12-15 + 2008-02-01 + 2008-09-01 + 2009-04-04 + latest + +Refer to :ref:`EC2-compatible metadata ` for information on +the contents and structure of these directories. + + +.. _metadata-config-drive: + +Config drives +------------- + +.. note:: + + This section provides end user information about config drives. For + deployment information about the config drive feature, refer to the + :doc:`admin guide `. + +*Config drives* are special drives that are attached to an instance when +it boots. The instance can mount this drive and read files from it to get +information that is normally available through the metadata service. + +One use case for using the config drive is to pass a networking configuration +when you do not use DHCP to assign IP addresses to instances. For example, you +might pass the IP address configuration for the instance through the config +drive, which the instance can mount and access before you configure the network +settings for the instance. + +Using the config drive +~~~~~~~~~~~~~~~~~~~~~~ + +To enable the config drive for an instance, pass the ``--config-drive true`` +parameter to the :command:`openstack server create` command. + +The following example enables the config drive and passes a user data file and +two key/value metadata pairs, all of which are accessible from the config +drive: + +.. code-block:: console + + $ openstack server create --config-drive true --image my-image-name \ + --flavor 1 --key-name mykey --user-data ./my-user-data.txt \ + --property role=webservers --property essential=false MYINSTANCE + +.. note:: + + The Compute service can be configured to always create a config drive. For + more information, refer to :doc:`the admin guide `. + +If your guest operating system supports accessing disk by label, you can mount +the config drive as the ``/dev/disk/by-label/configurationDriveVolumeLabel`` +device. In the following example, the config drive has the ``config-2`` volume +label: + +.. code-block:: console + + # mkdir -p /mnt/config + # mount /dev/disk/by-label/config-2 /mnt/config + +If your guest operating system does not use ``udev``, the ``/dev/disk/by-label`` +directory is not present. You can use the :command:`blkid` command to identify +the block device that corresponds to the config drive. For example: + +.. code-block:: console + + # blkid -t LABEL="config-2" -odevice + /dev/vdb + +Once identified, you can mount the device: + +.. code-block:: console + + # mkdir -p /mnt/config + # mount /dev/vdb /mnt/config + +Once mounted, you can examine the contents of the config drive: + +.. code-block:: console + + $ cd /mnt/config + $ find . -maxdepth 2 + . + ./ec2 + ./ec2/2009-04-04 + ./ec2/latest + ./openstack + ./openstack/2012-08-10 + ./openstack/2013-04-04 + ./openstack/2013-10-17 + ./openstack/2015-10-15 + ./openstack/2016-06-30 + ./openstack/2016-10-06 + ./openstack/2017-02-22 + ./openstack/latest + +The files that appear on the config drive depend on the arguments that you pass +to the :command:`openstack server create` command. The format of this directory +is the same as that provided by the :ref:`metadata service `, +with the exception that the EC2-compatible metadata is now located in the +``ec2`` directory instead of the root (``/``) directory. Refer to the +:ref:`metadata-openstack-format` and :ref:`metadata-ec2-format` sections for +information about the format of the files and subdirectories within these +directories. + + +Nova metadata +------------- + +As noted previously, nova provides its metadata in two formats: OpenStack format +and EC2-compatible format. + +.. _metadata-openstack-format: + +OpenStack format metadata +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionchanged:: 12.0.0 + + Support for network metadata was added in the Liberty release. + +Metadata from the OpenStack API is distributed in JSON format. There are two +files provided for each version: ``meta_data.json`` and ``network_data.json``. +The ``meta_data.json`` file contains nova-specific information, while the +``network_data.json`` file contains information retrieved from neutron. For +example: + +.. code-block:: console + + $ curl http://169.254.169.254/openstack/2018-08-27/meta_data.json + +.. code-block:: json + + { + "random_seed": "yu5ZnkqF2CqnDZVAfZgarG...", + "availability_zone": "nova", + "keys": [ + { + "data": "ssh-rsa AAAAB3NzaC1y...== Generated by Nova\n", + "type": "ssh", + "name": "mykey" + } + ], + "hostname": "test.novalocal", + "launch_index": 0, + "meta": { + "priority": "low", + "role": "webserver" + }, + "devices": [ + { + "type": "nic", + "bus": "pci", + "address": "0000:00:02.0", + "mac": "00:11:22:33:44:55", + "tags": ["trusted"] + }, + { + "type": "disk", + "bus": "ide", + "address": "0:0", + "serial": "disk-vol-2352423", + "path": "/dev/sda", + "tags": ["baz"] + } + ], + "project_id": "f7ac731cc11f40efbc03a9f9e1d1d21f", + "public_keys": { + "mykey": "ssh-rsa AAAAB3NzaC1y...== Generated by Nova\n" + }, + "name": "test" + } + +.. code-block:: console + + $ curl http://169.254.169.254/openstack/2018-08-27/network_data.json + +.. code-block:: json + + { + "links": [ + { + "ethernet_mac_address": "fa:16:3e:9c:bf:3d", + "id": "tapcd9f6d46-4a", + "mtu": null, + "type": "bridge", + "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc" + } + ], + "networks": [ + { + "id": "network0", + "link": "tapcd9f6d46-4a", + "network_id": "99e88329-f20d-4741-9593-25bf07847b16", + "type": "ipv4_dhcp" + } + ], + "services": [ + { + "address": "8.8.8.8", + "type": "dns" + } + ] + } + + +::download:`Download` network_data.json JSON schema. + +.. _metadata-ec2-format: + +EC2-compatible metadata +~~~~~~~~~~~~~~~~~~~~~~~ + +The EC2-compatible API is compatible with version 2009-04-04 of the `Amazon EC2 +metadata service`__ This means that virtual machine images designed for EC2 will +work properly with OpenStack. + +The EC2 API exposes a separate URL for each metadata element. Retrieve a +listing of these elements by making a GET query to +``http://169.254.169.254/2009-04-04/meta-data/``. For example: + +.. code-block:: console + + $ curl http://169.254.169.254/2009-04-04/meta-data/ + ami-id + ami-launch-index + ami-manifest-path + block-device-mapping/ + hostname + instance-action + instance-id + instance-type + kernel-id + local-hostname + local-ipv4 + placement/ + public-hostname + public-ipv4 + public-keys/ + ramdisk-id + reservation-id + security-groups + +.. code-block:: console + + $ curl http://169.254.169.254/2009-04-04/meta-data/block-device-mapping/ + ami + +.. code-block:: console + + $ curl http://169.254.169.254/2009-04-04/meta-data/placement/ + availability-zone + +.. code-block:: console + + $ curl http://169.254.169.254/2009-04-04/meta-data/public-keys/ + 0=mykey + +Instances can retrieve the public SSH key (identified by keypair name when a +user requests a new instance) by making a GET request to +``http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key``: + +.. code-block:: console + + $ curl http://169.254.169.254/2009-04-04/meta-data/public-keys/0/openssh-key + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDYVEprvtYJXVOBN0XNKVVRNCRX6BlnNbI+US\ + LGais1sUWPwtSg7z9K9vhbYAPUZcq8c/s5S9dg5vTHbsiyPCIDOKyeHba4MUJq8Oh5b2i71/3B\ + ISpyxTBH/uZDHdslW2a+SrPDCeuMMoss9NFhBdKtDkdG9zyi0ibmCP6yMdEX8Q== Generated\ + by Nova + +__ https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + + +.. _metadata-userdata: + +User data +--------- + +*User data* is a blob of data that the user can specify when they launch an +instance. The instance can access this data through the metadata service or +config drive. Commonly used to pass a shell script that the instance runs on +boot. + +For example, one application that uses user data is the `cloud-init +`__ system, which is an open-source +package from Ubuntu that is available on various Linux distributions and which +handles early initialization of a cloud instance. + +You can place user data in a local file and pass it through the ``--user-data +`` parameter at instance creation. + +.. code-block:: console + + $ openstack server create --image ubuntu-cloudimage --flavor 1 \ + --user-data mydata.file VM_INSTANCE + +.. note:: + + The provided user data should not be base64-encoded, as it will be + automatically encoded in order to pass valid input to the REST + API, which has a limit of 65535 bytes after encoding. + +Once booted, you can access this data from the instance using either the +metadata service or the config drive. To access it via the metadata service, +make a GET request to either +``http://169.254.169.254/openstack/{version}/user_data`` (OpenStack API) or +``http://169.254.169.254/{version}/user-data`` (EC2-compatible API). For +example: + +.. code-block:: console + + $ curl http://169.254.169.254/openstack/2018-08-27/user_data + +.. code-block:: shell + + #!/bin/bash + echo 'Extra user data here' + + +.. _metadata-vendordata: + +Vendordata +---------- + +.. note:: + + This section provides end user information about the vendordata feature. For + deployment information about this feature, refer to the :doc:`admin guide + `. + +.. versionchanged:: 14.0.0 + + Support for dynamic vendor data was added in the Newton release. + +**Where configured**, instances can retrieve vendor-specific data from the +metadata service or config drive. To access it via the metadata service, make a +GET request to either +``http://169.254.169.254/openstack/{version}/vendor_data.json`` or +``http://169.254.169.254/openstack/{version}/vendor_data2.json``, depending on +the deployment. For example: + +.. code-block:: console + + $ curl http://169.254.169.254/openstack/2018-08-27/vendor_data2.json + +.. code-block:: json + + { + "testing": { + "value1": 1, + "value2": 2, + "value3": "three" + } + } + +.. note:: + + The presence and contents of this file will vary from deployment to + deployment. + + +General guidelines +------------------ + +- Do not rely on the presence of the EC2 metadata in the metadata API or + config drive, because this content might be removed in a future release. For + example, do not rely on files in the ``ec2`` directory. + +- When you create images that access metadata service or config drive data and + multiple directories are under the ``openstack`` directory, always select the + highest API version by date that your consumer supports. For example, if your + guest image supports the ``2012-03-05``, ``2012-08-05``, and ``2013-04-13`` + versions, try ``2013-04-13`` first and fall back to a previous version if + ``2013-04-13`` is not present. diff --git a/doc/source/user/placement.rst b/doc/source/user/placement.rst deleted file mode 100644 index bc714a62276..00000000000 --- a/doc/source/user/placement.rst +++ /dev/null @@ -1,333 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=============== - Placement API -=============== - -Overview -======== - -Nova introduced the placement API service in the 14.0.0 Newton release. This -is a separate REST API stack and data model used to track resource provider -inventories and usages, along with different classes of resources. For example, -a resource provider can be a compute node, a shared storage pool, or an IP -allocation pool. The placement service tracks the inventory and usage of each -provider. For example, an instance created on a compute node may be a consumer -of resources such as RAM and CPU from a compute node resource provider, disk -from an external shared storage pool resource provider and IP addresses from -an external IP pool resource provider. - -The types of resources consumed are tracked as **classes**. The service -provides a set of standard resource classes (for example ``DISK_GB``, -``MEMORY_MB``, and ``VCPU``) and provides the ability to define custom -resource classes as needed. - -Each resource provider may also have a set of traits which describe qualitative -aspects of the resource provider. Traits describe an aspect of a resource -provider that cannot itself be consumed but a workload may wish to specify. For -example, available disk may be solid state drives (SSD). - -References -~~~~~~~~~~ - -The following specifications represent the stages of design and development of -resource providers and the Placement service. Implementation details may have -changed or be partially complete at this time. - -* `Generic Resource Pools `_ -* `Compute Node Inventory `_ -* `Resource Provider Allocations `_ -* `Resource Provider Base Models `_ -* `Nested Resource Providers`_ -* `Custom Resource Classes `_ -* `Scheduler Filters in DB `_ -* `Scheduler claiming resources to the Placement API `_ -* `The Traits API - Manage Traits with ResourceProvider `_ -* `Request Traits During Scheduling`_ -* `filter allocation candidates by aggregate membership`_ -* `perform granular allocation candidate requests`_ - -.. _Nested Resource Providers: http://specs.openstack.org/openstack/nova-specs/specs/queens/approved/nested-resource-providers.html -.. _Request Traits During Scheduling: https://specs.openstack.org/openstack/nova-specs/specs/queens/approved/request-traits-in-nova.html -.. _filter allocation candidates by aggregate membership: https://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/alloc-candidates-member-of.html -.. _perform granular allocation candidate requests: http://specs.openstack.org/openstack/nova-specs/specs/rocky/approved/granular-resource-requests.html - -Deployment -========== - -The placement-api service must be deployed at some point after you have -upgraded to the 14.0.0 Newton release but before you can upgrade to the 15.0.0 -Ocata release. This is so that the resource tracker in the nova-compute service -can populate resource provider (compute node) inventory and allocation -information which will be used by the nova-scheduler service in Ocata. - -Steps -~~~~~ - -**1. Deploy the API service** - -At this time the placement API code is still in Nova alongside the compute REST -API code (nova-api). So once you have upgraded nova-api to Newton you already -have the placement API code, you just need to install the service. Nova -provides a ``nova-placement-api`` WSGI script for running the service with -Apache, nginx or other WSGI-capable web servers. Depending on what packaging -solution is used to deploy OpenStack, the WSGI script may be in ``/usr/bin`` -or ``/usr/local/bin``. - -.. note:: The placement API service is currently developed within Nova but - it is designed to be as separate as possible from the existing code so - that it can eventually be split into a separate project. - -``nova-placement-api``, as a standard WSGI script, provides a module level -``application`` attribute that most WSGI servers expect to find. This means it -is possible to run it with lots of different servers, providing flexibility in -the face of different deployment scenarios. Common scenarios include: - -* apache2_ with mod_wsgi_ -* apache2 with mod_proxy_uwsgi_ -* nginx_ with uwsgi_ -* nginx with gunicorn_ - -In all of these scenarios the host, port and mounting path (or prefix) of the -application is controlled in the web server's configuration, not in the -configuration (``nova.conf``) of the placement application. - -When placement was `first added to DevStack`_ it used the ``mod_wsgi`` style. -Later it `was updated`_ to use mod_proxy_uwsgi_. Looking at those changes can -be useful for understanding the relevant options. - -DevStack is configured to host placement at ``/placement`` on either the -default port for http or for https (``80`` or ``443``) depending on whether TLS -is being used. Using a default port is desirable. - -By default, the placement application will get its configuration for settings -such as the database connection URL from ``/etc/nova/nova.conf``. The directory -the configuration file will be found in can be changed by setting -``OS_PLACEMENT_CONFIG_DIR`` in the environment of the process that starts the -application. - -.. note:: When using uwsgi with a front end (e.g., apache2 or nginx) something - needs to ensure that the uwsgi process is running. In DevStack this is done - with systemd_. This is one of many different ways to manage uwsgi. - -This document refrains from declaring a set of installation instructions for -the placement service. This is because a major point of having a WSGI -application is to make the deployment as flexible as possible. Because the -placement API service is itself stateless (all state is in the database), it is -possible to deploy as many servers as desired behind a load balancing solution -for robust and simple scaling. If you familiarize yourself with installing -generic WSGI applications (using the links in the common scenarios list, -above), those techniques will be applicable here. - -.. _apache2: http://httpd.apache.org/ -.. _mod_wsgi: https://modwsgi.readthedocs.io/ -.. _mod_proxy_uwsgi: http://uwsgi-docs.readthedocs.io/en/latest/Apache.html -.. _nginx: http://nginx.org/ -.. _uwsgi: http://uwsgi-docs.readthedocs.io/en/latest/Nginx.html -.. _gunicorn: http://gunicorn.org/ -.. _first added to DevStack: https://review.openstack.org/#/c/342362/ -.. _was updated: https://review.openstack.org/#/c/456717/ -.. _systemd: https://review.openstack.org/#/c/448323/ - -**2. Synchronize the database** - -In the Newton release the Nova **api** database is the only deployment -option for the placement API service and the resources it manages. After -upgrading the nova-api service for Newton and running the -``nova-manage api_db sync`` command the placement tables will be created. - -With the Rocky release, it has become possible to use a separate database for -placement. If :oslo.config:option:`placement_database.connection` is -configured with a database connect string, that database will be used for -storing placement data. Once the database is created, the -``nova-manage api_db sync`` command will create and synchronize both the -nova api and placement tables. If ``[placement_database]/connection`` is not -set, the nova api database will be used. - -.. note:: At this time there is no facility for migrating existing placement - data from the nova api database to a placement database. There are - many ways to do this. Which one is best will depend on the environment. - -**3. Create accounts and update the service catalog** - -Create a **placement** service user with an **admin** role in Keystone. - -The placement API is a separate service and thus should be registered under -a **placement** service type in the service catalog as that is what the -resource tracker in the nova-compute node will use to look up the endpoint. - -Devstack sets up the placement service on the default HTTP port (80) with a -``/placement`` prefix instead of using an independent port. - -**4. Configure and restart nova-compute services** - -The 14.0.0 Newton nova-compute service code will begin reporting resource -provider inventory and usage information as soon as the placement API -service is in place and can respond to requests via the endpoint registered -in the service catalog. - -``nova.conf`` on the compute nodes must be updated in the ``[placement]`` -group to contain credentials for making requests from nova-compute to the -placement-api service. - -.. note:: After upgrading nova-compute code to Newton and restarting the - service, the nova-compute service will attempt to make a connection - to the placement API and if that is not yet available a warning will - be logged. The nova-compute service will keep attempting to connect - to the placement API, warning periodically on error until it is - successful. Keep in mind that Placement is optional in Newton, but - required in Ocata, so the placement service should be enabled before - upgrading to Ocata. nova.conf on the compute nodes will need to be - updated in the ``[placement]`` group for credentials to make requests - from nova-compute to the placement-api service. - - -.. _placement-upgrade-notes: - -Upgrade Notes -============= - -The following sub-sections provide notes on upgrading to a given target release. - -.. note:: - - As a reminder, the :doc:`nova-status upgrade check ` tool - can be used to help determine the status of your deployment and how ready it - is to perform an upgrade. - -Ocata (15.0.0) -~~~~~~~~~~~~~~ - -* The ``nova-compute`` service will fail to start in Ocata unless the - ``[placement]`` section of nova.conf on the compute is configured. As - mentioned in the deployment steps above, the Placement service should be - deployed by this point so the computes can register and start reporting - inventory and allocation information. If the computes are deployed - and configured `before` the Placement service, they will continue to try - and reconnect in a loop so that you do not need to restart the nova-compute - process to talk to the Placement service after the compute is properly - configured. -* The ``nova.scheduler.filter_scheduler.FilterScheduler`` in Ocata will - fallback to not using the Placement service as long as there are older - ``nova-compute`` services running in the deployment. This allows for rolling - upgrades of the computes to not affect scheduling for the FilterScheduler. - However, the fallback mechanism will be removed in the 16.0.0 Pike release - such that the scheduler will make decisions based on the Placement service - and the resource providers (compute nodes) registered there. This means if - the computes are not reporting into Placement by Pike, build requests will - fail with **NoValidHost** errors. -* While the FilterScheduler technically depends on the Placement service - in Ocata, if you deploy the Placement service `after` you upgrade the - ``nova-scheduler`` service to Ocata and restart it, things will still work. - The scheduler will gracefully handle the absence of the Placement service. - However, once all computes are upgraded, the scheduler not being able to make - requests to Placement will result in **NoValidHost** errors. -* It is currently possible to exclude the ``CoreFilter``, ``RamFilter`` and - ``DiskFilter`` from the list of enabled FilterScheduler filters such that - scheduling decisions are not based on CPU, RAM or disk usage. Once all - computes are reporting into the Placement service, however, and the - FilterScheduler starts to use the Placement service for decisions, those - excluded filters are ignored and the scheduler will make requests based on - VCPU, MEMORY_MB and DISK_GB inventory. If you wish to effectively ignore - that type of resource for placement decisions, you will need to adjust the - corresponding ``cpu_allocation_ratio``, ``ram_allocation_ratio``, and/or - ``disk_allocation_ratio`` configuration options to be very high values, e.g. - 9999.0. -* Users of CellsV1 will need to deploy a placement per cell, matching - the scope and cardinality of the regular ``nova-scheduler`` process. - -Pike (16.0.0) -~~~~~~~~~~~~~ - -* The ``nova.scheduler.filter_scheduler.FilterScheduler`` in Pike will - no longer fall back to not using the Placement Service, even if older - computes are running in the deployment. -* The FilterScheduler now requests allocation candidates from the Placement - service during scheduling. The allocation candidates information was - introduced in the Placement API 1.10 microversion, so you should upgrade the - placement service **before** the Nova scheduler service so that the scheduler - can take advantage of the allocation candidate information. - - The scheduler gets the allocation candidates from the placement API and - uses those to get the compute nodes, which come from the cell(s). The - compute nodes are passed through the enabled scheduler filters and weighers. - The scheduler then iterates over this filtered and weighed list of hosts and - attempts to claim resources in the placement API for each instance in the - request. Claiming resources involves finding an allocation candidate that - contains an allocation against the selected host's UUID and asking the - placement API to allocate the requested instance resources. We continue - performing this claim request until success or we run out of allocation - candidates, resulting in a NoValidHost error. - - For a move operation, such as migration, allocations are made in Placement - against both the source and destination compute node. Once the - move operation is complete, the resource tracker in the *nova-compute* - service will adjust the allocations in Placement appropriately. - - For a resize to the same host, allocations are summed on the single compute - node. This could pose a problem if the compute node has limited capacity. - Since resizing to the same host is disabled by default, and generally only - used in testing, this is mentioned for completeness but should not be a - concern for production deployments. - -Queens (17.0.0) -~~~~~~~~~~~~~~~ - -* The minimum Placement API microversion required by the *nova-scheduler* - service is ``1.17`` in order to support `Request Traits During Scheduling`_. - This means you must upgrade the placement service before upgrading any - *nova-scheduler* services to Queens. - -Rocky (18.0.0) -~~~~~~~~~~~~~~ - -* The ``nova-api`` service now requires the ``[placement]`` section to be - configured in nova.conf if you are using a separate config file just for - that service. This is because the ``nova-api`` service now needs to talk - to the placement service in order to (1) delete resource provider allocations - when deleting an instance and the ``nova-compute`` service on which that - instance is running is down (2) delete a ``nova-compute`` service record via - the ``DELETE /os-services/{service_id}`` API and (3) mirror aggregate host - associations to the placement service. This change is idempotent if - ``[placement]`` is not configured in ``nova-api`` but it will result in new - warnings in the logs until configured. -* As described above, before Rocky, the placement service used the nova api - database to store placement data. In Rocky, if the ``connection`` setting in - a ``[placement_database]`` group is set in configuration, that group will be - used to describe where and how placement data is stored. - -REST API -======== - -The placement API service has its own `REST API`_ and data model. One -can get a sample of the REST API via the functional test `gabbits`_. - -.. _`REST API`: https://developer.openstack.org/api-ref/placement/ -.. _gabbits: http://git.openstack.org/cgit/openstack/nova/tree/nova/tests/functional/api/openstack/placement/gabbits - -Microversions -~~~~~~~~~~~~~ - -The placement API uses microversions for making incremental changes to the -API which client requests must opt into. - -It is especially important to keep in mind that nova-compute is a client of -the placement REST API and based on how Nova supports rolling upgrades the -nova-compute service could be Newton level code making requests to an Ocata -placement API, and vice-versa, an Ocata compute service in a cells v2 cell -could be making requests to a Newton placement API. - -.. _placement-api-microversion-history: - -.. include:: ../../../nova/api/openstack/placement/rest_api_version_history.rst diff --git a/doc/source/user/quotas.rst b/doc/source/user/quotas.rst index e1b54484af6..7377e0f1a24 100644 --- a/doc/source/user/quotas.rst +++ b/doc/source/user/quotas.rst @@ -1,78 +1,235 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======== - Quotas -======== +====== +Quotas +====== Nova uses a quota system for setting limits on resources such as number of instances or amount of CPU that a specific project or user can use. -Quotas are enforced by making a claim, or reservation, on resources when a -request is made, such as creating a new server. If the claim fails, the request -is rejected. If the reservation succeeds then the operation progresses until -such a point that the reservation is either converted into usage (the operation -was successful) or rolled back (the operation failed). - -Typically the quota reservation is made in the nova-api service and the usage -or rollback is performed in the nova-compute service, at least when dealing -with a server creation or move operation. - -Quota limits and usage can be retrieved via the ``limits`` REST API. - -Checking quota -============== - -When calculating limits for a given resource and tenant, the following -checks are made in order: - -* Depending on the resource, is there a tenant-specific limit on the resource - in either the `quotas` or `project_user_quotas` tables in the database? If - so, use that as the limit. You can create these resources by doing:: - - openstack quota set --instances 5 - -* Check to see if there is a hard limit for the given resource in the - `quota_classes` table in the database for the `default` quota class. If so, - use that as the limit. You can modify the default quota limit for a resource - by doing:: - - openstack quota set --class --instances 5 default - -* If the above does not provide a resource limit, then rely on the ``quota_*`` - configuration options for the default limit. - -.. note:: The API sets the limit in the `quota_classes` table. Once a default - limit is set via the `default` quota class, that takes precedence over - any changes to that resource limit in the configuration options. In other - words, once you've changed things via the API, you either have to keep those - synchronized with the configuration values or remove the default limit from - the database manually as there is no REST API for removing quota class - values from the database. - - -Known issues -============ - -TODO: talk about quotas getting out of sync and `how to recover`_ - -.. _how to recover: https://specs.openstack.org/openstack/nova-specs/specs/newton/implemented/refresh-quotas-usage.html - - -Future plans -============ - -TODO: talk about quotas in the `resource counting spec`_ and `nested quotas`_ - -.. _resource counting spec: https://specs.openstack.org/openstack/nova-specs/specs/ocata/approved/cells-count-resources-to-check-quota-in-api.html -.. _nested quotas: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/approved/nested-quota-driver-api.html +Quota limits and usage can be retrieved using the command-line interface. + + +Types of quota +-------------- + +.. list-table:: + :header-rows: 1 + :widths: 10 40 + + * - Quota name + - Description + * - cores + - Number of instance cores (VCPUs) allowed per project. + * - instances + - Number of instances allowed per project. + * - key_pairs + - Number of key pairs allowed per user. + * - metadata_items + - Number of metadata items allowed per instance. + * - ram + - Megabytes of instance ram allowed per project. + * - server_groups + - Number of server groups per project. + * - server_group_members + - Number of servers per server group. + +The following quotas were previously available but were removed in microversion +2.36 as they proxied information available from the networking service. + +.. list-table:: + :header-rows: 1 + :widths: 10 40 + + * - Quota name + - Description + * - fixed_ips + - Number of fixed IP addresses allowed per project. This number + must be equal to or greater than the number of allowed + instances. + * - floating_ips + - Number of floating IP addresses allowed per project. + * - networks + - Number of networks allowed per project (no longer used). + * - security_groups + - Number of security groups per project. + * - security_group_rules + - Number of security group rules per project. + +Similarly, the following quotas were previously available but were removed in +microversion 2.57 as the personality files feature was deprecated. + +.. list-table:: + :header-rows: 1 + :widths: 10 40 + + * - Quota name + - Description + * - injected_files + - Number of injected files allowed per project. + * - injected_file_content_bytes + - Number of content bytes allowed per injected file. + * - injected_file_path_bytes + - Length of injected file path. + + +Usage +----- + +Project quotas +~~~~~~~~~~~~~~ + +To list all default quotas for projects, run: + +.. code-block:: console + + $ openstack quota show --default + +.. note:: + + This lists default quotas for all services and not just nova. + +For example: + +.. code-block:: console + + $ openstack quota show --default + +----------------------+----------+ + | Field | Value | + +----------------------+----------+ + | backup-gigabytes | 1000 | + | backups | 10 | + | cores | 20 | + | fixed-ips | -1 | + | floating-ips | 50 | + | gigabytes | 1000 | + | health_monitors | None | + | injected-file-size | 10240 | + | injected-files | 5 | + | injected-path-size | 255 | + | instances | 10 | + | key-pairs | 100 | + | l7_policies | None | + | listeners | None | + | load_balancers | None | + | location | None | + | name | None | + | networks | 10 | + | per-volume-gigabytes | -1 | + | pools | None | + | ports | 50 | + | project | None | + | project_name | project | + | properties | 128 | + | ram | 51200 | + | rbac_policies | 10 | + | routers | 10 | + | secgroup-rules | 100 | + | secgroups | 10 | + | server-group-members | 10 | + | server-groups | 10 | + | snapshots | 10 | + | subnet_pools | -1 | + | subnets | 10 | + | volumes | 10 | + +----------------------+----------+ + +To list the currently set quota values for your project, run: + +.. code-block:: console + + $ openstack quota show PROJECT + +where ``PROJECT`` is the ID or name of your project. For example: + +.. code-block:: console + + $ openstack quota show $OS_PROJECT_ID + +----------------------+----------------------------------+ + | Field | Value | + +----------------------+----------------------------------+ + | backup-gigabytes | 1000 | + | backups | 10 | + | cores | 32 | + | fixed-ips | -1 | + | floating-ips | 10 | + | gigabytes | 1000 | + | health_monitors | None | + | injected-file-size | 10240 | + | injected-files | 5 | + | injected-path-size | 255 | + | instances | 10 | + | key-pairs | 100 | + | l7_policies | None | + | listeners | None | + | load_balancers | None | + | location | None | + | name | None | + | networks | 20 | + | per-volume-gigabytes | -1 | + | pools | None | + | ports | 60 | + | project | c8156b55ec3b486193e73d2974196993 | + | project_name | project | + | properties | 128 | + | ram | 65536 | + | rbac_policies | 10 | + | routers | 10 | + | secgroup-rules | 50 | + | secgroups | 50 | + | server-group-members | 10 | + | server-groups | 10 | + | snapshots | 10 | + | subnet_pools | -1 | + | subnets | 20 | + | volumes | 10 | + +----------------------+----------------------------------+ + +To view a list of options for the :command:`openstack quota show` command, run: + +.. code-block:: console + + $ openstack quota show --help + +User quotas +~~~~~~~~~~~ + +.. note:: + + User-specific quotas are legacy and will be removed when migration to + :keystone-doc:`unified limits ` is complete. + User-specific quotas were added as a way to provide two-level hierarchical + quotas and this feature is already being offered in unified limits. For + this reason, the below commands have not and will not be ported to + openstackclient. + +To list the quotas for your user, run: + +.. code-block:: console + + $ nova quota-show --user USER --tenant PROJECT + +where ``USER`` is the ID or name of your user and ``PROJECT`` is the ID or name +of your project. For example: + +.. code-block:: console + + $ nova quota-show --user $OS_USERNAME --tenant $OS_PROJECT_ID + +-----------------------------+-------+ + | Quota | Limit | + +-----------------------------+-------+ + | instances | 10 | + | cores | 32 | + | ram | 65536 | + | metadata_items | 128 | + | injected_files | 5 | + | injected_file_content_bytes | 10240 | + | injected_file_path_bytes | 255 | + | key_pairs | 100 | + | server_groups | 10 | + | server_group_members | 10 | + +-----------------------------+-------+ + +To view a list of options for the :command:`nova quota-show` command, run: + +.. code-block:: console + + $ nova help quota-show diff --git a/doc/source/user/reboot.rst b/doc/source/user/reboot.rst new file mode 100644 index 00000000000..7f642869ef4 --- /dev/null +++ b/doc/source/user/reboot.rst @@ -0,0 +1,24 @@ +================== +Reboot an instance +================== + +You can soft or hard reboot a running instance. A soft reboot attempts a +graceful shut down and restart of the instance. A hard reboot power +cycles the instance. + +To reboot a server, use the :command:`openstack server reboot` command: + +.. code-block:: console + + $ openstack server reboot SERVER + +By default, when you reboot an instance it is a soft reboot. +To perform a hard reboot, pass the ``--hard`` parameter as follows: + +.. code-block:: console + + $ openstack server reboot --hard SERVER + +It is also possible to reboot a running instance into rescue mode. For example, +this operation may be required if a filesystem of an instance becomes corrupted +with prolonged use. See :doc:`rescue` for more details. diff --git a/doc/source/user/rescue.rst b/doc/source/user/rescue.rst new file mode 100644 index 00000000000..55a1b71dae5 --- /dev/null +++ b/doc/source/user/rescue.rst @@ -0,0 +1,101 @@ +================== +Rescue an instance +================== + +Instance rescue provides a mechanism for access, even if an image renders the +instance inaccessible. Two rescue modes are currently provided. + +Instance rescue +--------------- + +By default the instance is booted from the provided rescue image or a fresh +copy of the original instance image if a rescue image is not provided. The root +disk and optional regenerated config drive are also attached to the instance +for data recovery. + +.. note:: + + Rescuing a volume-backed instance is not supported with this mode. + +Stable device instance rescue +----------------------------- + +As of 21.0.0 (Ussuri) an additional stable device rescue mode is available. +This mode now supports the rescue of volume-backed instances. + +This mode keeps all devices both local and remote attached in their original +order to the instance during the rescue while booting from the provided rescue +image. This mode is enabled and controlled by the presence of +``hw_rescue_device`` or ``hw_rescue_bus`` image properties on the provided +rescue image. + +As their names suggest these properties control the rescue device type +(``cdrom``, ``disk`` or ``floppy``) and bus type (``scsi``, ``virtio``, +``ide``, or ``usb``) used when attaching the rescue image to the instance. + +Support for each combination of the ``hw_rescue_device`` and ``hw_rescue_bus`` +image properties is dependent on the underlying hypervisor and platform being +used. For example the ``IDE`` bus is not available on POWER KVM based compute +hosts. + +.. note:: + + This mode is only supported when using the Libvirt virt driver. + + This mode is not supported when using the LXC hypervisor as enabled by + the :oslo.config:option:`libvirt.virt_type` configurable on the computes. + +Usage +----- + +.. note:: + + Pause, suspend, and stop operations are not allowed when an instance + is running in rescue mode, as triggering these actions causes the + loss of the original instance state and makes it impossible to + unrescue the instance. + +To perform an instance rescue, use the :command:`openstack server rescue` +command: + +.. code-block:: console + + $ openstack server rescue SERVER + +.. note:: + + On running the :command:`openstack server rescue` command, + an instance performs a soft shutdown first. This means that + the guest operating system has a chance to perform + a controlled shutdown before the instance is powered off. + The shutdown behavior is configured by the + :oslo.config:option:`shutdown_timeout` parameter that can be set in the + ``nova.conf`` file. + Its value stands for the overall period (in seconds) + a guest operating system is allowed to complete the shutdown. + + The timeout value can be overridden on a per image basis + by means of ``os_shutdown_timeout`` that is an image metadata + setting allowing different types of operating systems to specify + how much time they need to shut down cleanly. + +To rescue an instance that boots from a volume you need to use the +:ref:`2.87 microversion or later `. + +.. code-block:: console + + $ openstack --os-compute-api-version 2.87 server rescue SERVER + +If you want to rescue an instance with a specific image, rather than the +default one, use the ``--image`` parameter: + +.. code-block:: console + + $ openstack server rescue --image IMAGE_ID SERVER + +To restart the instance from the normal boot disk, run the following +command: + +.. code-block:: console + + $ openstack server unrescue SERVER diff --git a/doc/source/user/resize.rst b/doc/source/user/resize.rst index 10aa3660b80..53b4a133b49 100644 --- a/doc/source/user/resize.rst +++ b/doc/source/user/resize.rst @@ -1,119 +1,61 @@ -============================== -Change the size of your server -============================== - -Change the size of a server by changing its flavor. - -#. Show information about your server, including its size, which is shown - as the value of the flavor property: - - .. code-block:: console - - $ openstack server show myCirrosServer - +--------------------------------------+----------------------------------------------------------+ - | Field | Value | - +--------------------------------------+----------------------------------------------------------+ - | OS-DCF:diskConfig | AUTO | - | OS-EXT-AZ:availability_zone | nova | - | OS-EXT-SRV-ATTR:host | node-7.domain.tld | - | OS-EXT-SRV-ATTR:hypervisor_hostname | node-7.domain.tld | - | OS-EXT-SRV-ATTR:instance_name | instance-000000f3 | - | OS-EXT-STS:power_state | 1 | - | OS-EXT-STS:task_state | None | - | OS-EXT-STS:vm_state | active | - | OS-SRV-USG:launched_at | 2016-10-26T01:13:15.000000 | - | OS-SRV-USG:terminated_at | None | - | accessIPv4 | | - | accessIPv6 | | - | addresses | admin_internal_net=192.168.111.139 | - | config_drive | True | - | created | 2016-10-26T01:12:38Z | - | flavor | m1.small (2) | - | hostId | d815539ce1a8fad3d597c3438c13f1229d3a2ed66d1a75447845a2f3 | - | id | 67bc9a9a-5928-47c4-852c-3631fef2a7e8 | - | image | cirros-test (dc5ec4b8-5851-4be8-98aa-df7a9b8f538f) | - | key_name | None | - | name | myCirrosServer | - | os-extended-volumes:volumes_attached | [] | - | progress | 0 | - | project_id | c08367f25666480f9860c6a0122dfcc4 | - | properties | | - | security_groups | [{u'name': u'default'}] | - | status | ACTIVE | - | updated | 2016-10-26T01:13:00Z | - | user_id | 0209430e30924bf9b5d8869990234e44 | - +--------------------------------------+----------------------------------------------------------+ - - The size (flavor) of the server is ``m1.small (2)``. - -#. List the available flavors with the following command: - - .. code-block:: console - - $ openstack flavor list - +-----+-----------+-------+------+-----------+-------+-----------+ - | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is_Public | - +-----+-----------+-------+------+-----------+-------+-----------+ - | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | - | 2 | m1.small | 2048 | 20 | 0 | 1 | True | - | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | - | 4 | m1.large | 8192 | 80 | 0 | 4 | True | - | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | - +-----+-----------+-------+------+-----------+-------+-----------+ - -#. To resize the server, use the :command:`openstack server resize` command and - add the server ID or name and the new flavor. For example: - - .. code-block:: console - - $ openstack server resize --flavor 4 myCirrosServer - - - .. note:: - - By default, the :command:`openstack server resize` command gives - the guest operating - system a chance to perform a controlled shutdown before the instance - is powered off and the instance is resized. - The shutdown behavior is configured by the - :oslo.config:option:`shutdown_timeout` parameter that can be set in the - ``nova.conf`` file. Its value stands for the overall - period (in seconds) a guest operating system is allowed - to complete the shutdown. The default timeout is 60 seconds. - - The timeout value can be overridden on a per image basis - by means of ``os_shutdown_timeout`` that is an image metadata - setting allowing different types of operating systems to specify - how much time they need to shut down cleanly. See - :glance-doc:`Useful image properties ` - for details. - -#. Show the status for your server. - - .. code-block:: console - - $ openstack server list - +----------------------+----------------+--------+-----------------------------------------+ - | ID | Name | Status | Networks | - +----------------------+----------------+--------+-----------------------------------------+ - | 67bc9a9a-5928-47c... | myCirrosServer | RESIZE | admin_internal_net=192.168.111.139 | - +----------------------+----------------+--------+-----------------------------------------+ - - When the resize completes, the status becomes ``VERIFY_RESIZE``. - -#. Confirm the resize. For example: - - .. code-block:: console - - $ openstack server resize --confirm 67bc9a9a-5928-47c4-852c-3631fef2a7e8 - - The server status becomes ``ACTIVE``. - -#. If the resize fails or does not work as expected, you can revert the - resize. For example: - - .. code-block:: console - - $ openstack server resize --revert 67bc9a9a-5928-47c4-852c-3631fef2a7e8 - - The server status becomes ``ACTIVE``. +================== +Resize an instance +================== + +You can change the size of an instance by changing its flavor. This rebuilds +the instance and therefore results in a restart. + +To list the VMs you want to resize, run: + +.. code-block:: console + + $ openstack server list + +Once you have the name or UUID of the server you wish to resize, resize it +using the :command:`openstack server resize` command: + +.. code-block:: console + + $ openstack server resize --flavor FLAVOR SERVER + +.. note:: + + By default, the :command:`openstack server resize` command gives the guest + operating system a chance to perform a controlled shutdown before the + instance is powered off and the instance is resized. This behavior can be + configured by the administrator but it can also be overridden on a per image + basis using the ``os_shutdown_timeout`` image metadata setting. This allows + different types of operating systems to specify how much time they need to + shut down cleanly. See :glance-doc:`Useful image properties + ` for details. + +Resizing can take some time. During this time, the instance status will be +``RESIZE``: + +.. code-block:: console + + $ openstack server list + +----------------------+----------------+--------+-----------------------------------------+ + | ID | Name | Status | Networks | + +----------------------+----------------+--------+-----------------------------------------+ + | 67bc9a9a-5928-47c... | myCirrosServer | RESIZE | admin_internal_net=192.168.111.139 | + +----------------------+----------------+--------+-----------------------------------------+ + +When the resize completes, the instance status will be ``VERIFY_RESIZE``. +You can now confirm the resize to change the status to ``ACTIVE``: + +.. code-block:: console + + $ openstack server resize confirm SERVER + +.. note:: + + The resized server may be automatically confirmed based on the + administrator's configuration of the deployment. + +If the resize does not work as expected, you can revert the resize. This will +revert the instance to the old flavor and change the status to ``ACTIVE``: + +.. code-block:: console + + $ openstack server resize revert SERVER diff --git a/doc/source/user/support-matrix.ini b/doc/source/user/support-matrix.ini index e8bc8783959..7ed837787f6 100644 --- a/doc/source/user/support-matrix.ini +++ b/doc/source/user/support-matrix.ini @@ -71,9 +71,6 @@ # document, and merge it with this when their code merges into # Nova core. -[driver.xenserver] -title=XenServer - [driver.libvirt-kvm-x86] title=Libvirt KVM (x86) @@ -98,9 +95,6 @@ title=Libvirt Virtuozzo VM [driver.libvirt-vz-ct] title=Libvirt Virtuozzo CT -[driver.libvirt-xen] -title=Libvirt Xen - [driver.vmware] title=VMware vCenter @@ -113,6 +107,9 @@ title=Ironic [driver.powervm] title=PowerVM +[driver.zvm] +title=zVM + [operation.attach-volume] title=Attach block volume to instance status=optional @@ -125,14 +122,12 @@ notes=The attach volume operation provides a means to hotplug is considered to be more of a pet than cattle. Therefore this operation is not considered to be mandatory to support. cli=nova volume-attach -driver.xenserver=complete driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown +driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=missing @@ -141,6 +136,7 @@ driver.libvirt-vz-ct=missing driver.powervm=complete driver-notes.powervm=This is not tested for every CI run. Add a "powervm:volume-check" comment to trigger a CI job running volume tests. +driver.zvm=missing [operation.attach-tagged-volume] title=Attach tagged block device to instance @@ -148,34 +144,31 @@ status=optional notes=Attach a block device with a tag to an existing server instance. See "Device tags" for more information. cli=nova volume-attach [--tag ] -driver.xenserver=missing driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown +driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.detach-volume] title=Detach block volume from instance status=optional notes=See notes for attach volume operation. cli=nova volume-detach -driver.xenserver=complete driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown +driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=missing @@ -184,6 +177,7 @@ driver.libvirt-vz-ct=missing driver.powervm=complete driver-notes.powervm=This is not tested for every CI run. Add a "powervm:volume-check" comment to trigger a CI job running volume tests. +driver.zvm=missing [operation.extend-volume] title=Extend block volume attached to instance @@ -197,14 +191,12 @@ notes=The extend volume operation provides a means to extend where the instance is considered to be more of a pet than cattle. Therefore this operation is not considered to be mandatory to support. cli=cinder extend -driver.xenserver=missing driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown +driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=unknown driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=unknown driver.vmware=missing driver.hyperv=missing driver.ironic=missing @@ -213,6 +205,7 @@ driver.libvirt-vz-ct=missing driver.powervm=complete driver-notes.powervm=This is not tested for every CI run. Add a "powervm:volume-check" comment to trigger a CI job running volume tests. +driver.zvm=missing [operation.attach-interface] title=Attach virtual network interface to instance @@ -225,14 +218,12 @@ notes=The attach interface operation provides a means to hotplug In a cloud model it would be more typical to just spin up a new instance with more interfaces. cli=nova interface-attach -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=partial driver-notes.hyperv=Works without issue if instance is off. When @@ -242,6 +233,7 @@ driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=missing [operation.attach-tagged-interface] title=Attach tagged virtual network interface to instance @@ -249,34 +241,31 @@ status=optional notes=Attach a virtual network interface with a tag to an existing server instance. See "Device tags" for more information. cli=nova interface-attach [--tag ] -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=missing -driver.hyperv=missing +driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.detach-interface] title=Detach virtual network interface from instance status=optional notes=See notes for attach-interface operation. cli=nova interface-detach -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver-notes.hyperv=Works without issue if instance is off. When @@ -286,6 +275,7 @@ driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=missing [operation.maintenance-mode] title=Set the host in a maintenance mode @@ -298,20 +288,19 @@ notes=This operation allows a host to be placed into maintenance The driver methods to implement are "host_maintenance_mode" and "set_host_enabled". cli=nova host-update -driver.xenserver=complete driver.libvirt-kvm-x86=missing driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=missing driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.evacuate] title=Evacuate instances from a host @@ -325,20 +314,19 @@ notes=A possible failure scenario in a cloud environment is the outage dropped. That happens in the same way as a rebuild. This is not considered to be a mandatory operation to support. cli=nova evacuate ;nova host-evacuate -driver.xenserver=unknown driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=unknown driver.libvirt-lxc=unknown -driver.libvirt-xen=unknown driver.vmware=unknown driver.hyperv=unknown driver.ironic=unknown driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=unknown [operation.rebuild] title=Rebuild instance @@ -349,20 +337,19 @@ notes=A possible use case is additional attributes need to be set 'personalities'. Though this is not considered to be a mandatory operation to support. cli=nova rebuild -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=missing +driver.zvm=unknown [operation.get-guest-info] title=Guest instance status @@ -372,20 +359,19 @@ notes=Provides realtime information about the power state of the guest tracking changes in guests, this operation is considered mandatory to support. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=complete [operation.get-host-uptime] title=Guest host uptime @@ -393,20 +379,19 @@ status=optional notes=Returns the result of host uptime since power on, it's used to report hypervisor status. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=complete [operation.get-host-ip] title=Guest host ip @@ -414,20 +399,19 @@ status=optional notes=Returns the ip of this host, it's used when doing resize and migration. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=complete [operation.live-migrate] title=Live migrate instance across hosts @@ -444,21 +428,19 @@ notes=Live migration provides a way to move an instance off one built on the container based virtualization. Therefore this operation is not considered mandatory to support. cli=nova live-migration ;nova host-evacuate-live -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete -driver.vmware=missing -driver-notes.vmware=https://bugs.launchpad.net/nova/+bug/1192192 +driver.vmware=complete driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=missing +driver.zvm=missing [operation.force-live-migration-to-complete] title=Force live migration to complete @@ -474,7 +456,6 @@ notes=Live migration provides a way to move a running instance to another a switch to post-copy mode. Otherwise the instance will be suspended until the migration is completed or aborted. cli=nova live-migration-force-complete -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=missing driver-notes.libvirt-kvm-x86=Requires libvirt>=1.3.3, qemu>=2.5.0 @@ -485,13 +466,42 @@ driver-notes.libvirt-kvm-s390x=Requires libvirt>=1.3.3, qemu>=2.5.0 driver.libvirt-qemu-x86=complete driver-notes.libvirt-qemu-x86=Requires libvirt>=1.3.3, qemu>=2.5.0 driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing + +[operation.abort-in-progress-live-migration] +title=Abort an in-progress or queued live migration +status=optional +notes=Live migration provides a way to move a running instance to another + compute host. But it can sometimes need a large amount of time to complete + if an instance has a high rate of memory or disk page access or is stuck in + queued status if there are too many in-progress live migration jobs in the + queue. + This operation provides the user with an option to abort in-progress live + migrations. + When the live migration job is still in "queued" or "preparing" status, + it can be aborted regardless of the type of underneath hypervisor, but once + the job status changes to "running", only some of the hypervisors support + this feature. +cli=nova live-migration-abort +driver.libvirt-kvm-x86=complete +driver.libvirt-kvm-aarch64=missing +driver.libvirt-kvm-ppc64=complete +driver.libvirt-kvm-s390x=complete +driver.libvirt-qemu-x86=complete +driver.libvirt-lxc=missing +driver.vmware=missing +driver.hyperv=missing +driver.ironic=missing +driver.libvirt-vz-vm=unknown +driver.libvirt-vz-ct=unknown +driver.powervm=missing +driver.zvm=missing [operation.launch] title=Launch instance @@ -500,20 +510,19 @@ notes=Importing pre-existing running virtual machines on a host is considered out of scope of the cloud paradigm. Therefore this operation is mandatory to support in drivers. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=complete [operation.pause] title=Stop instance CPUs (pause) @@ -528,20 +537,19 @@ notes=Stopping an instances CPUs can be thought of as roughly implement it. Therefore this operation is considered optional to support in drivers. cli=nova pause -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=complete [operation.reboot] title=Reboot instance @@ -552,20 +560,19 @@ notes=It is reasonable for a guest OS administrator to trigger a reboot can be achieved by a combination of stop+start. Therefore this operation is considered optional. cli=nova reboot -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=complete [operation.rescue] title=Rescue instance @@ -579,20 +586,19 @@ notes=The rescue operation starts an instance in a special thrown away and a new instance created. Therefore this operation is considered optional to support in drivers. cli=nova rescue -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=missing +driver.zvm=missing [operation.resize] title=Resize instance @@ -606,14 +612,12 @@ notes=The resize operation allows the user to change a running running instance. Therefore this operation is considered optional to support in drivers. cli=nova resize -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=missing @@ -622,26 +626,26 @@ driver-notes.vz-vm=Resizing Virtuozzo instances implies guest filesystem resize driver.libvirt-vz-ct=complete driver-notes.vz-ct=Resizing Virtuozzo instances implies guest filesystem resize also driver.powervm=missing +driver.zvm=missing [operation.resume] title=Restore instance status=optional notes=See notes for the suspend operation cli=nova resume -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=missing +driver.zvm=missing [operation.set-admin-password] title=Set instance admin password @@ -658,8 +662,6 @@ notes=Provides a mechanism to (re)set the password of the administrator this is just a convenient optimization. Therefore this operation is not considered mandatory for drivers to support. cli=nova set-password -driver.xenserver=complete -driver-notes.xenserver=Requires XenAPI agent on the guest. driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver-notes.libvirt-kvm-x86=Requires libvirt>=1.2.16 and hw_qemu_guest_agent. @@ -668,7 +670,6 @@ driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver-notes.libvirt-qemu-x86=Requires libvirt>=1.2.16 and hw_qemu_guest_agent. driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing @@ -677,6 +678,7 @@ driver-notes.libvirt-vz-vm=Requires libvirt>=2.0.0 driver.libvirt-vz-ct=complete driver-notes.libvirt-vz-ct=Requires libvirt>=2.0.0 driver.powervm=missing +driver.zvm=missing [operation.snapshot] title=Save snapshot of instance disk @@ -692,15 +694,12 @@ notes=The snapshot operation allows the current state of the snapshot cannot be assumed. Therefore this operation is not considered mandatory to support. cli=nova image-create -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=partial -driver-notes.libvirt-xen=Only cold snapshots (pause + snapshot) supported driver.vmware=complete driver.hyperv=complete driver.ironic=missing @@ -711,6 +710,7 @@ driver-notes.powervm=When using the localdisk disk driver, snapshot is only supported if I/O is being hosted by the management partition. If hosting I/O on traditional VIOS, we are limited by the fact that a VSCSI device can't be mapped to two partitions (the VIOS and the management) at once. +driver.zvm=complete [operation.suspend] title=Suspend instance @@ -731,20 +731,19 @@ notes=Suspending an instance can be thought of as roughly the instance instead of suspending. Therefore this operation is considered optional to support. cli=nova suspend -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=missing +driver.zvm=missing [operation.swap-volume] title=Swap block volumes @@ -758,20 +757,19 @@ notes=The swap volume operation is a mechanism for changing a running migration to work in the volume service. This is considered optional to support. cli=nova volume-update -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.terminate] title=Shutdown instance @@ -781,7 +779,6 @@ notes=The ability to terminate a virtual machine is required in avoid indefinitely ongoing billing. Therefore this operation is mandatory to support in drivers. cli=nova delete -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete @@ -792,13 +789,13 @@ driver-notes.libvirt-lxc=Fails in latest Ubuntu Trusty kernel from security repository (3.13.0-76-generic), but works in upstream 3.13.x kernels as well as default Ubuntu Trusty latest kernel (3.13.0-58-generic). -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=complete [operation.trigger-crash-dump] title=Trigger crash dump @@ -809,40 +806,38 @@ notes=The trigger crash dump operation is a mechanism for triggering a means to dump the production memory image as a dump file which is useful for users. Therefore this operation is considered optional to support. cli=nova trigger-crash-dump -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=complete driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.unpause] title=Resume instance CPUs (unpause) status=optional notes=See notes for the "Stop instance CPUs" operation cli=nova unpause -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=missing +driver.zvm=complete [guest.disk.autoconfig] title=Auto configure disk @@ -851,20 +846,19 @@ notes=Partition and resize FS to match the size specified by flavors.root_gb, As this is hypervisor specific feature. Therefore this operation is considered optional to support. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=missing driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=missing driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=complete [guest.disk.rate-limit] title=Instance disk I/O limits @@ -876,20 +870,19 @@ notes=The ability to set rate limits on virtual disks allows for of doing fine grained tuning. Therefore this is not considered to be an mandatory configuration to support. cli=nova limits -driver.xenserver=missing driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown +driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [guest.setup.configdrive] title=Config drive support @@ -904,7 +897,6 @@ notes=The config drive provides an information channel into of the guest setup mechanisms is required to be supported by drivers, in order to enable login access. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver-notes.libvirt-kvm-aarch64=Requires kernel with proper config (oldest known: Ubuntu 4.13 HWE) @@ -912,13 +904,13 @@ driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.powervm=complete +driver.zvm=complete [guest.setup.inject.file] title=Inject files into disk image @@ -933,20 +925,19 @@ notes=This allows for the end user to provide data for multiple service or config drive. Therefore this operation is considered optional to support. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [guest.setup.inject.networking] title=Inject guest networking config @@ -962,8 +953,6 @@ notes=This allows for static networking configuration (IP config drive. Therefore this operation is considered optional to support. cli= -driver.xenserver=partial -driver-notes.xenserver=Only for Debian derived guests driver.libvirt-kvm-x86=partial driver-notes.libvirt-kvm-x86=Only for Debian derived guests driver.libvirt-kvm-aarch64=unknown @@ -972,7 +961,6 @@ driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=partial driver-notes.libvirt-qemu-x86=Only for Debian derived guests driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=partial driver-notes.vmware=requires vmware tools installed driver.hyperv=missing @@ -980,6 +968,7 @@ driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [console.rdp] title=Remote desktop over RDP @@ -993,20 +982,19 @@ notes=This allows the administrator to interact with the graphical mandatory, however, a driver is required to support at least one of the listed console access operations. cli=nova get-rdp-console -driver.xenserver=missing driver.libvirt-kvm-x86=missing driver.libvirt-kvm-aarch64=missing driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=missing driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [console.serial.log] title=View serial console logs @@ -1021,20 +1009,19 @@ notes=This allows the administrator to query the logs of data operation is not mandatory, however, a driver is required to support at least one of the listed console access operations. cli=nova console-log -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=complete [console.serial.interactive] title=Remote interactive serial console @@ -1050,20 +1037,19 @@ notes=This allows the administrator to interact with the serial This feature was introduced in the Juno release with blueprint https://blueprints.launchpad.net/nova/+spec/serial-ports cli=nova get-serial-console -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=unknown driver.libvirt-lxc=unknown -driver.libvirt-xen=unknown driver.vmware=missing driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [console.spice] title=Remote desktop over SPICE @@ -1077,20 +1063,19 @@ notes=This allows the administrator to interact with the graphical mandatory, however, a driver is required to support at least one of the listed console access operations. cli=nova get-spice-console -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [console.vnc] title=Remote desktop over VNC @@ -1104,20 +1089,19 @@ notes=This allows the administrator to interact with the graphical mandatory, however, a driver is required to support at least one of the listed console access operations. cli=nova get-vnc-console -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete +driver.zvm=missing [storage.block] title=Block storage support @@ -1133,14 +1117,12 @@ notes=Block storage provides instances with direct attached the network. Therefore support for this configuration is not considered mandatory for drivers to support. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete @@ -1149,6 +1131,7 @@ driver.libvirt-vz-ct=missing driver.powervm=complete driver-notes.powervm=This is not tested for every CI run. Add a "powervm:volume-check" comment to trigger a CI job running volume tests. +driver.zvm=missing [storage.block.backend.fibrechannel] title=Block storage over fibre channel @@ -1158,14 +1141,12 @@ notes=To maximise performance of the block storage, it may be desirable technology on the compute hosts. Since this is just a performance optimization of the I/O path it is not considered mandatory to support. cli= -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=complete driver.ironic=missing @@ -1174,6 +1155,7 @@ driver.libvirt-vz-ct=missing driver.powervm=complete driver-notes.powervm=This is not tested for every CI run. Add a "powervm:volume-check" comment to trigger a CI job running volume tests. +driver.zvm=missing [storage.block.backend.iscsi] title=Block storage over iSCSI @@ -1186,20 +1168,19 @@ notes=If the driver wishes to support block storage, it is common to block storage, then this is considered mandatory to support, otherwise it is considered optional. cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [storage.block.backend.iscsi.auth.chap] title=CHAP authentication for iSCSI @@ -1209,20 +1190,19 @@ notes=If accessing the cinder iSCSI service over an untrusted LAN it protocol. CHAP is the commonly used authentication protocol for iSCSI. This is not considered mandatory to support. (?) cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [storage.image] title=Image storage support @@ -1234,153 +1214,41 @@ notes=This refers to the ability to boot an instance from an image on external PXE servers is out of scope. Therefore this is considered a mandatory storage feature to support. cli=nova boot --image -driver.xenserver=complete -driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=complete -driver.libvirt-kvm-ppc64=complete -driver.libvirt-kvm-s390x=complete -driver.libvirt-qemu-x86=complete -driver.libvirt-lxc=complete -driver.libvirt-xen=complete -driver.vmware=complete -driver.hyperv=complete -driver.ironic=complete -driver.libvirt-vz-vm=complete -driver.libvirt-vz-ct=complete -driver.powervm=complete - -[networking.firewallrules] -title=Network firewall rules -status=optional -notes=Unclear how this is different from security groups -cli= -driver.xenserver=complete driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete -driver.vmware=missing -driver.hyperv=missing -driver.ironic=missing -driver.libvirt-vz-vm=complete -driver.libvirt-vz-ct=complete -driver.powervm=complete - -[networking.routing] -title=Network routing -status=optional -notes=Unclear what this refers to -cli= -driver.xenserver=complete -driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown -driver.libvirt-kvm-ppc64=missing -driver.libvirt-kvm-s390x=complete -driver.libvirt-qemu-x86=complete -driver.libvirt-lxc=complete -driver.libvirt-xen=complete -driver.vmware=complete -driver.hyperv=missing -driver.ironic=complete -driver.libvirt-vz-vm=complete -driver.libvirt-vz-ct=complete -driver.powervm=complete - -[networking.securitygroups] -title=Network security groups -status=optional -notes=The security groups feature provides a way to define rules - to isolate the network traffic of different instances running - on a compute host. This would prevent actions such as MAC and - IP address spoofing, or the ability to setup rogue DHCP servers. - In a private cloud environment this may be considered to be a - superfluous requirement. Therefore this is considered to be an - optional configuration to support. -cli= -driver.xenserver=complete -driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=complete -driver.libvirt-kvm-ppc64=complete -driver.libvirt-kvm-s390x=complete -driver.libvirt-qemu-x86=complete -driver.libvirt-lxc=complete -driver.libvirt-xen=complete -driver.vmware=partial -driver-notes.vmware=This is supported by the Neutron NSX plugins -driver.hyperv=missing -driver.ironic=missing -driver.libvirt-vz-vm=complete -driver.libvirt-vz-ct=complete -driver.powervm=complete - -[networking.topology.flat] -title=Flat networking -status=choice(networking.topology) -notes=Provide network connectivity to guests using a - flat topology across all compute nodes. At least one - of the networking configurations is mandatory to - support in the drivers. -cli= -driver.xenserver=complete -driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown -driver.libvirt-kvm-ppc64=complete -driver.libvirt-kvm-s390x=complete -driver.libvirt-qemu-x86=complete -driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=complete driver.hyperv=complete driver.ironic=complete driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=complete - -[networking.topology.vlan] -title=VLAN networking -status=choice(networking.topology) -notes=Provide network connectivity to guests using VLANs to define the - topology when using nova-network. At least one of the networking - configurations is mandatory to support in the drivers. -cli= -driver.xenserver=complete -driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown -driver.libvirt-kvm-ppc64=complete -driver.libvirt-kvm-s390x=complete -driver.libvirt-qemu-x86=complete -driver.libvirt-lxc=complete -driver.libvirt-xen=complete -driver.vmware=complete -driver.hyperv=missing -driver.ironic=missing -driver.libvirt-vz-vm=complete -driver.libvirt-vz-ct=complete -driver.powervm=complete +driver.zvm=complete [operation.uefi-boot] title=uefi boot status=optional notes=This allows users to boot a guest with uefi firmware. cli= -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=missing driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing -driver.vmware=missing -driver.hyperv=missing +driver.vmware=complete +driver.hyperv=complete +driver-notes.hyperv=In order to use uefi, a second generation Hyper-V vm must + be requested. driver.ironic=partial driver-notes.ironic=depends on hardware support driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.device-tags] title=Device tags @@ -1398,20 +1266,19 @@ notes=This allows users to set tags on virtual devices when creating a Instead, device role tags should be used. Device tags can be applied to virtual network interfaces and block devices. cli=nova boot -driver.xenserver=complete driver.libvirt-kvm-x86=complete -driver.libvirt-kvm-aarch64=unknown +driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=unknown -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=complete driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=unknown driver.powervm=missing +driver.zvm=missing [operation.quiesce] title=quiesce @@ -1420,40 +1287,38 @@ notes=Quiesce the specified instance to prepare for snapshots. For libvirt, guest filesystems will be frozen through qemu agent. cli= -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.unquiesce] title=unquiesce status=optional notes=See notes for the quiesce operation cli= -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.multiattach-volume] title=Attach block volume to multiple instances @@ -1465,20 +1330,49 @@ notes=The multiattach volume operation is an extension to Note that for the libvirt driver, this is only supported if qemu<2.10 or libvirt>=3.10. cli=nova volume-attach -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing + +[operation.encrypted-volume] +title=Attach encrypted block volume to server +status=optional +notes=This is the same as the attach volume operation + except with an encrypted block device. Encrypted + volumes are controlled via admin-configured volume + types in the block storage service. Since attach + volume is optional this feature is also optional for + compute drivers to support. +cli=nova volume-attach +driver.libvirt-kvm-x86=complete +driver-notes.libvirt-kvm-x86=For native QEMU decryption of the + encrypted volume (and rbd support), QEMU>=2.6.0 and libvirt>=2.2.0 + are required and only the "luks" type provider is supported. Otherwise + both "luks" and "cryptsetup" types are supported but not natively, i.e. + not all volume types are supported. +driver.libvirt-kvm-aarch64=unknown +driver.libvirt-kvm-ppc64=unknown +driver.libvirt-kvm-s390x=unknown +driver.libvirt-qemu-x86=complete +driver-notes.libvirt-qemu-x86=The same restrictions apply as KVM x86. +driver.libvirt-lxc=missing +driver.vmware=missing +driver.hyperv=missing +driver.ironic=missing +driver.libvirt-vz-vm=unknown +driver.libvirt-vz-ct=missing +driver.powervm=missing +driver.zvm=missing [operation.trusted-certs] title=Validate image with trusted certificates @@ -1489,20 +1383,19 @@ notes=Since trusted image certification validation is configurable drivers cannot support the feature since it is mostly just plumbing user requests through the virt driver when downloading images. cli=nova boot --trusted-image-certificate-id ... -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=complete driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=complete driver.libvirt-qemu-x86=complete driver.libvirt-lxc=complete -driver.libvirt-xen=complete driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=complete driver.libvirt-vz-ct=complete driver.powervm=missing +driver.zvm=missing [operation.file-backed-memory] title=File backed memory @@ -1511,22 +1404,21 @@ notes=The file backed memory feature in Openstack allows a Nova node to serve guest memory from a file backing store. This mechanism uses the libvirt file memory source, causing guest instance memory to be allocated as files within the libvirt memory backing directory. This is only supported if - qemu>2.6 and libivrt>4.0.0 + qemu>2.6 and libvirt>4.0.0 cli= -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=unknown driver.libvirt-kvm-s390x=unknown driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing [operation.report-cpu-traits] title=Report CPU traits @@ -1535,17 +1427,115 @@ notes=The report CPU traits feature in OpenStack allows a Nova node to report its CPU traits according to CPU mode configuration. This gives users the ability to boot instances based on desired CPU traits. cli= -driver.xenserver=missing driver.libvirt-kvm-x86=complete driver.libvirt-kvm-aarch64=unknown driver.libvirt-kvm-ppc64=complete driver.libvirt-kvm-s390x=missing driver.libvirt-qemu-x86=complete driver.libvirt-lxc=missing -driver.libvirt-xen=missing driver.vmware=missing driver.hyperv=missing driver.ironic=missing driver.libvirt-vz-vm=missing driver.libvirt-vz-ct=missing driver.powervm=missing +driver.zvm=missing + +[operation.port-with-resource-request] +title=SR-IOV ports with resource request +status=optional +notes=To support neutron SR-IOV ports (vnic_type=direct or vnic_type=macvtap) + with resource request the virt driver needs to include the 'parent_ifname' + key in each subdict which represents a VF under the 'pci_passthrough_devices' + key in the dict returned from the ComputeDriver.get_available_resource() + call. +cli=nova boot --nic port-id ... +driver.libvirt-kvm-x86=complete +driver.libvirt-kvm-aarch64=missing +driver.libvirt-kvm-ppc64=missing +driver.libvirt-kvm-s390x=missing +driver.libvirt-qemu-x86=complete +driver.libvirt-lxc=missing +driver.vmware=missing +driver.hyperv=missing +driver.ironic=missing +driver.libvirt-vz-vm=missing +driver.libvirt-vz-ct=missing +driver.powervm=missing +driver.zvm=missing + +[operation.boot-encrypted-vm] +title=Boot instance with secure encrypted memory +status=optional +notes=The feature allows VMs to be booted with their memory + hardware-encrypted with a key specific to the VM, to help + protect the data residing in the VM against access from anyone + other than the user of the VM. The Configuration and Security + Guides specify usage of this feature. +cli=openstack server create +driver.libvirt-kvm-x86=partial +driver-notes.libvirt-kvm-x86=This feature is currently only + available with hosts which support the SEV (Secure Encrypted + Virtualization) technology from AMD. +driver.libvirt-kvm-aarch64=missing +driver.libvirt-kvm-ppc64=missing +driver.libvirt-kvm-s390x=missing +driver.libvirt-qemu-x86=missing +driver.libvirt-lxc=missing +driver.vmware=missing +driver.hyperv=missing +driver.ironic=missing +driver.libvirt-vz-vm=missing +driver.libvirt-vz-ct=missing +driver.powervm=missing +driver.zvm=missing + +[operation.cache-images] +title=Cache base images for faster instance boot +status=optional +notes=Drivers supporting this feature cache base images on the compute host so + that subsequent boots need not incur the expense of downloading them. Partial + support entails caching an image after the first boot that uses it. Complete + support allows priming the cache so that the first boot also benefits. Image + caching support is tunable via config options in the [image_cache] group. +cli=openstack server create +driver.libvirt-kvm-x86=complete +driver.libvirt-kvm-aarch64=complete +driver.libvirt-kvm-ppc64=complete +driver.libvirt-kvm-s390x=complete +driver.libvirt-qemu-x86=complete +driver.libvirt-lxc=unknown +driver.vmware=partial +driver.hyperv=partial +driver.ironic=missing +driver.libvirt-vz-vm=complete +driver.libvirt-vz-ct=complete +driver.powervm=partial +driver-notes.powervm=The PowerVM driver does image caching natively when using + the SSP disk driver. It does not use the config options in the [image_cache] + group. +driver.zvm=missing + +[operation.boot-emulated-tpm] +title=Boot instance with an emulated trusted platform module (TPM) +status=optional +notes=Allows VMs to be booted with an emulated trusted platform module (TPM) + device. Only lifecycle operations performed by the VM owner are supported, as + the user's credentials are required to unlock the virtual device files on the + host. +cli=openstack server create +driver.libvirt-kvm-x86=partial +driver-notes.libvirt-kvm-x86=Move operations are not yet supported. +driver.libvirt-kvm-aarch64=missing +driver.libvirt-kvm-ppc64=missing +driver.libvirt-kvm-s390x=missing +driver.libvirt-qemu-x86=partial +driver-notes.libvirt-qemu-x86=Move operations are not yet supported. +driver.libvirt-lxc=missing +driver.vmware=missing +driver.hyperv=missing +driver.ironic=missing +driver.libvirt-vz-vm=missing +driver.libvirt-vz-ct=missing +driver.powervm=missing +driver.zvm=missing diff --git a/doc/source/user/support-matrix.rst b/doc/source/user/support-matrix.rst index 1e3488360d0..89094ba7475 100644 --- a/doc/source/user/support-matrix.rst +++ b/doc/source/user/support-matrix.rst @@ -1,4 +1,3 @@ - Feature Support Matrix ====================== @@ -7,7 +6,7 @@ following general guiding principles were applied * **Inclusivity** - people have shown ability to make effective use of a wide range of virtualization technologies with broadly - varying featuresets. Aiming to keep the requirements as inclusive + varying feature sets. Aiming to keep the requirements as inclusive as possible, avoids second-guessing what a user may wish to use the cloud compute service for. @@ -21,7 +20,7 @@ following general guiding principles were applied * **Competition** - an early leader in the cloud compute service space was Amazon EC2. A sanity check for whether a feature should be mandatory is to consider whether it was available in the first - public release of EC2. This had quite a narrow featureset, but + public release of EC2. This had quite a narrow feature set, but none the less found very high usage in many use cases. So it serves to illustrate that many features need not be considered mandatory in order to get useful work done. diff --git a/doc/source/user/upgrade.rst b/doc/source/user/upgrade.rst deleted file mode 100644 index 4cb77c9b7ec..00000000000 --- a/doc/source/user/upgrade.rst +++ /dev/null @@ -1,358 +0,0 @@ -.. - Copyright 2014 Rackspace - All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -Upgrades -======== - -Nova aims to provide upgrades with minimal downtime. - -Firstly, the data plane. There should be no VM downtime when you upgrade -Nova. Nova has had this since the early days, with the exception of -some nova-network related services. - -Secondly, we want no downtime during upgrades of the Nova control plane. -This document is trying to describe how we can achieve that. - -Once we have introduced the key concepts relating to upgrade, we will -introduce the process needed for a no downtime upgrade of nova. - -.. _minimal_downtime_upgrade: - -Minimal Downtime Upgrade Process --------------------------------- - - -Plan your upgrade -''''''''''''''''' - -* Read and ensure you understand the release notes for the next release. - -* You should ensure all required steps from the previous upgrade have been - completed, such as data migrations. - -* Make a backup of your database. Nova does not support downgrading of the - database. Hence, in case of upgrade failure, restoring database from backup - is the only choice. - -* During upgrade be aware that there will be additional load on nova-conductor. - You may find you need to add extra nova-conductor workers to deal with the - additional upgrade related load. - - -Rolling upgrade process -''''''''''''''''''''''' - -To reduce downtime, the compute services can be upgraded in a rolling fashion. It -means upgrading a few services at a time. This results in a condition where -both old (N) and new (N+1) nova-compute services co-exist for a certain time -period. Note that, there is no upgrade of the hypervisor here, this is just -upgrading the nova services. If reduced downtime is not a concern (or lower -complexity is desired), all services may be taken down and restarted at the -same time. - -#. Before maintenance window: - - * Start the process with the controller node. Install the code for the next - version of Nova, either in a venv or a separate control plane node, - including all the python dependencies. - - * Using the newly installed nova code, run the DB sync. - (``nova-manage api_db sync``; ``nova-manage db sync``). These schema - change operations should have minimal or no effect on performance, and - should not cause any operations to fail. - - * At this point, new columns and tables may exist in the database. These - DB schema changes are done in a way that both the N and N+1 release can - perform operations against the same schema. - -#. During maintenance window: - - * Several nova services rely on the external placement service being at the - latest level. Therefore, you must upgrade placement before any nova - services. See the - :ref:`placement upgrade notes ` for more - details on upgrading the placement service. - - * For maximum safety (no failed API operations), gracefully shutdown all - the services (i.e. SIG_TERM) except nova-compute. - - * Before restarting services with new code, perform the release-specific - readiness check with ``nova-status upgrade check``. See the - :ref:`nova-status upgrade check ` for more details - on status check. - - * Start all services on the new code, with - ``[upgrade_levels]compute=auto`` in nova.conf. It is safest to - start nova-conductor first and nova-api last. Note that you may - use a static alias name instead of ``auto``, such as - ``[upgrade_levels]compute=``. Also note that this step is - only required if compute services are not upgraded in lock-step - with the control services. - - * If desired, gracefully shutdown nova-compute (i.e. SIG_TERM) - services in small batches, then start the new version of the code - with: ``[upgrade_levels]compute=auto``. If this batch-based approach - is used, only a few compute nodes will have any delayed API - actions, and to ensure there is enough capacity online to service - any boot requests that happen during this time. - -#. After maintenance window: - - * Once all services are running the new code, double check in the DB that - there are no old orphaned service records using `nova service-list`. - - * Now that all services are upgraded, we need to send the SIG_HUP signal, so all - the services clear any cached service version data. When a new service - starts, it automatically detects which version of the compute RPC protocol - to use, and it can decide if it is safe to do any online data migrations. - Note, if you used a static value for the upgrade_level, such as - ``[upgrade_levels]compute=``, you must update nova.conf to remove - that configuration value and do a full service restart. - - * Now all the services are upgraded and signaled, the system is able to use - the latest version of the RPC protocol and can access all of the - features in the new release. - - * Once all the services are running the latest version of the code, and all - the services are aware they all have been upgraded, it is safe to - transform the data in the database into its new format. While some of this - work happens on demand when the system reads a database row that needs - updating, we must get all the data transformed into the current version - before the next upgrade. Additionally, some data may not be transformed - automatically so performing the data migration is necessary to avoid - performance degradation due to compatibility routines. - - * This process can put significant extra write load on the - database. Complete all online data migrations using: - ``nova-manage db online_data_migrations --max-count ``. Note - that you can use the ``--max-count`` argument to reduce the load this - operation will place on the database, which allows you to run a - small chunk of the migrations until all of the work is done. Each - time it is run, it will show a summary of completed and remaining - records. You run this command until you see completed and - remaining records as zeros. The chunk size you should use depend - on your infrastructure and how much additional load you can - impose on the database. To reduce load, perform smaller batches - with delays between chunks. To reduce time to completion, run - larger batches. - - * At this point, you must also ensure you update the configuration, to stop - using any deprecated features or options, and perform any required work - to transition to alternative features. All the deprecated options should - be supported for one cycle, but should be removed before your next - upgrade is performed. - - -Current Database Upgrade Types ------------------------------- - -Currently Nova has 2 types of database upgrades that are in use. - -#. Schema Migrations -#. Data Migrations - - -Schema Migrations -'''''''''''''''''' - -Schema migrations are defined in -``nova/db/sqlalchemy/migrate_repo/versions`` and in -``nova/db/sqlalchemy/api_migrations/migrate_repo/versions``. They are -the routines that transform our database structure, which should be -additive and able to be applied to a running system before service -code has been upgraded. - -.. note:: - - The API database migrations should be assumed to run before the - migrations for the main/cell databases. This is because the former - contains information about how to find and connect to the latter. - Some management commands that operate on multiple cells will attempt - to list and iterate over cell mapping records, which require a - functioning API database schema. - - -Data Migrations -''''''''''''''''' - -Online data migrations occur in two places: - -#. Inline migrations that occur as part of normal run-time - activity as data is read in the old format and written in the - new format -#. Background online migrations that are performed using - ``nova-manage`` to complete transformations that will not occur - incidentally due to normal runtime activity. - -An example of online data migrations are the flavor migrations done as part -of Nova object version 1.18. This included a transient migration of flavor -storage from one database location to another. - -.. note:: - - Database downgrades are not supported. - -Migration policy: -''''''''''''''''' - -The following guidelines for schema and data migrations are followed in order -to ease upgrades: - -* Additive schema migrations - In general, almost all schema migrations should - be additive. Put simply, they should only create elements like columns, - indices, and tables. - -* Subtractive schema migrations - To remove an element like a column or table - during the N release cycle: - - #. The element must be deprecated and retained for backward compatibility. - (This allows for graceful upgrade from N to N+1.) - - #. Data migration, by the objects layer, must completely migrate data from - the old version of the schema to the new version. - - * `Data migration example - `_ - * `Data migration enforcement example - `_ - (for sqlalchemy migrate/deprecated scripts): - - #. The column can then be removed with a migration at the start of N+2. - -* All schema migrations should be idempotent. (For example, a migration - should check if an element exists in the schema before attempting to add - it.) This logic comes for free in the autogenerated workflow of - the online migrations. - -* Constraints - When adding a foreign or unique key constraint, the schema - migration code needs to handle possible problems with data before applying - the constraint. (Example: A unique constraint must clean up duplicate - records before applying said constraint.) - -* Data migrations - As mentioned above, data migrations will be done in an - online fashion by custom code in the object layer that handles moving data - between the old and new portions of the schema. In addition, for each type - of data migration performed, there should exist a nova-manage option for an - operator to manually request that rows be migrated. - - * See `flavor migration spec - `_ - for an example of data migrations in the object layer. - -*Future* work - - #. Adding plumbing to enforce that relevant data migrations are completed - before running `contract` in the expand/migrate/contract schema migration - workflow. A potential solution would be for `contract` to run a gating - test for each specific subtract operation to determine if the operation - can be completed. - -Concepts --------- - -Here are the key concepts you need to know before reading the section on the -upgrade process: - -RPC version pinning - Through careful RPC versioning, newer nodes are able to talk to older - nova-compute nodes. When upgrading control plane nodes, we can pin them - at an older version of the compute RPC API, until all the compute nodes - are able to be upgraded. - https://wiki.openstack.org/wiki/RpcMajorVersionUpdates - - .. note:: - - This does not apply to cells v1 deployments since cells v1 does not - support rolling upgrades. It is assumed that cells v1 deployments are - upgraded in lockstep so n-1 cells compatibility does not work. - - The procedure for rolling upgrades with multiple cells v2 cells is not - yet determined. - -Online Configuration Reload - During the upgrade, we pin new serves at the older RPC version. When all - services are updated to use newer code, we need to unpin them so we are - able to use any new functionality. - To avoid having to restart the service, using the current SIGHUP signal - handling, or otherwise, ideally we need a way to update the currently - running process to use the latest configuration. - -Graceful service shutdown - Many nova services are python processes listening for messages on a - AMQP queue, including nova-compute. When sending the process the SIGTERM - the process stops getting new work from its queue, completes any - outstanding work, then terminates. During this process, messages can be - left on the queue for when the python process starts back up. - This gives us a way to shutdown a service using older code, and start - up a service using newer code with minimal impact. If its a service that - can have multiple workers, like nova-conductor, you can usually add the - new workers before the graceful shutdown of the old workers. In the case - of singleton services, like nova-compute, some actions could be delayed - during the restart, but ideally no actions should fail due to the restart. - - .. note:: - - While this is true for the RabbitMQ RPC backend, we need to confirm - what happens for other RPC backends. - -API load balancer draining - When upgrading API nodes, you can make your load balancer only send new - connections to the newer API nodes, allowing for a seamless update of your - API nodes. - -Expand/Contract DB Migrations - Modern databases are able to make many schema changes while you are still - writing to the database. Taking this a step further, we can make all DB - changes by first adding the new structures, expanding. Then you can slowly - move all the data into a new location and format. Once that is complete, - you can drop bits of the scheme that are no long needed, - i.e. contract. This happens multiple cycles after we have stopped - using a particular piece of schema, and can happen in a schema - migration without affecting runtime code. - -Online Data Migrations using objects - In Kilo we are moving all data migration into the DB objects code. - When trying to migrate data in the database from the old format to the - new format, this is done in the object code when reading or saving things - that are in the old format. For records that are not updated, you need to - run a background process to convert those records into the newer format. - This process must be completed before you contract the database schema. - -DB prune deleted rows - Currently resources are soft deleted in the main database, so users are able - to track instances in the DB that are created and destroyed in production. - However, most people have a data retention policy, of say 30 days or 90 - days after which they will want to delete those entries. Not deleting - those entries affects DB performance as indices grow very large and data - migrations take longer as there is more data to migrate. - -nova-conductor object backports - RPC pinning ensures new services can talk to the older service's method - signatures. But many of the parameters are objects that may well be too - new for the old service to understand, so you are able to send the object - back to the nova-conductor to be downgraded to a version the older service - can understand. - - -Testing -------- - -Once we have all the pieces in place, we hope to move the Grenade testing -to follow this new pattern. - -The current tests only cover the existing upgrade process where: - -* old computes can run with new control plane -* but control plane is turned off for DB migrations diff --git a/doc/source/user/user-data.rst b/doc/source/user/user-data.rst deleted file mode 100644 index 7e71482c531..00000000000 --- a/doc/source/user/user-data.rst +++ /dev/null @@ -1,22 +0,0 @@ -============================== -Provide user data to instances -============================== - -*User data* is a blob of data that the user can specify when they launch an -instance. The instance can access this data through the metadata service or -config drive. Commonly used to pass a shell script that the instance runs on -boot. - -For example, one application that uses user data is the -`cloud-init `__ system, -which is an open-source package from Ubuntu that is available on various -Linux distributions and which handles early initialization of a cloud -instance. - -You can place user data in a local file and pass it through the -``--user-data `` parameter at instance creation. - -.. code-block:: console - - $ openstack server create --image ubuntu-cloudimage --flavor 1 \ - --user-data mydata.file VM_INSTANCE diff --git a/doc/source/user/vendordata.rst b/doc/source/user/vendordata.rst deleted file mode 100644 index 1fc264d66c7..00000000000 --- a/doc/source/user/vendordata.rst +++ /dev/null @@ -1,133 +0,0 @@ -Vendordata -========== - -Nova presents configuration information to instances it starts via a mechanism -called metadata. This metadata is made available via either a configdrive, or -the metadata service. These mechanisms are widely used via helpers such as -cloud-init to specify things like the root password the instance should use. -There are three separate groups of people who need to be able to specify -metadata for an instance. - -User provided data ------------------- - -The user who booted the instance can pass metadata to the instance in several -ways. For authentication keypairs, the keypairs functionality of the Nova APIs -can be used to upload a key and then specify that key during the Nova boot API -request. For less structured data, a small opaque blob of data may be passed -via the user-data feature of the Nova API. Examples of such unstructured data -would be the puppet role that the instance should use, or the HTTP address of a -server to fetch post-boot configuration information from. - -Nova provided data ------------------- - -Nova itself needs to pass information to the instance via its internal -implementation of the metadata system. Such information includes the network -configuration for the instance, as well as the requested hostname for the -instance. This happens by default and requires no configuration by the user or -deployer. - -Deployer provided data ----------------------- - -There is however a third type of data. It is possible that the deployer of -OpenStack needs to pass data to an instance. It is also possible that this data -is not known to the user starting the instance. An example might be a -cryptographic token to be used to register the instance with Active Directory -post boot -- the user starting the instance should not have access to Active -Directory to create this token, but the Nova deployment might have permissions -to generate the token on the user's behalf. - -Nova supports a mechanism to add "vendordata" to the metadata handed to -instances. This is done by loading named modules, which must appear in the nova -source code. We provide two such modules: - -- StaticJSON: a module which can include the contents of a static JSON file - loaded from disk. This can be used for things which don't change between - instances, such as the location of the corporate puppet server. - -- DynamicJSON: a module which will make a request to an external REST service - to determine what metadata to add to an instance. This is how we recommend - you generate things like Active Directory tokens which change per instance. - -Tell me more about DynamicJSON -============================== - -To use DynamicJSON, you configure it like this: - -- Add "DynamicJSON" to the vendordata_providers configuration option. This can - also include "StaticJSON" if you'd like. -- Specify the REST services to be contacted to generate metadata in the - vendordata_dynamic_targets configuration option. There can be more than one - of these, but note that they will be queried once per metadata request from - the instance, which can mean a fair bit of traffic depending on your - configuration and the configuration of the instance. - -The format for an entry in vendordata_dynamic_targets is like this: - - @ - -Where name is a short string not including the '@' character, and where the -URL can include a port number if so required. An example would be:: - - testing@http://127.0.0.1:125 - -Metadata fetched from this target will appear in the metadata service at a -new file called vendordata2.json, with a path (either in the metadata service -URL or in the configdrive) like this: - - openstack/2016-10-06/vendor_data2.json - -For each dynamic target, there will be an entry in the JSON file named after -that target. For example:: - - { - "testing": { - "value1": 1, - "value2": 2, - "value3": "three" - } - } - -Do not specify the same name more than once. If you do, we will ignore -subsequent uses of a previously used name. - -The following data is passed to your REST service as a JSON encoded POST: - -+-------------+-------------------------------------------------+ -| Key | Description | -+=============+=================================================+ -| project-id | The ID of the project that owns this instance. | -+-------------+-------------------------------------------------+ -| instance-id | The UUID of this instance. | -+-------------+-------------------------------------------------+ -| image-id | The ID of the image used to boot this instance. | -+-------------+-------------------------------------------------+ -| user-data | As specified by the user at boot time. | -+-------------+-------------------------------------------------+ -| hostname | The hostname of the instance. | -+-------------+-------------------------------------------------+ -| metadata | As specified by the user at boot time. | -+-------------+-------------------------------------------------+ - -Deployment considerations -========================= - -Nova provides authentication to external metadata services in order to provide -some level of certainty that the request came from nova. This is done by -providing a service token with the request -- you can then just deploy your -metadata service with the keystone authentication WSGI middleware. This is -configured using the keystone authentication parameters in the -``vendordata_dynamic_auth`` configuration group. - -References -========== - -* Michael Still's talk from the Queens summit in Sydney: - `Metadata, User Data, Vendor Data, oh my!`_ -* Michael's blog post on `deploying a simple vendordata service`_ which - provides more details and sample code to supplement the documentation above. - -.. _Metadata, User Data, Vendor Data, oh my!: https://www.openstack.org/videos/sydney-2017/metadata-user-data-vendor-data-oh-my -.. _deploying a simple vendordata service: http://www.stillhq.com/openstack/000022.html diff --git a/doc/test/redirect-tests.txt b/doc/test/redirect-tests.txt index c73790cbf45..4ee7d865c99 100644 --- a/doc/test/redirect-tests.txt +++ b/doc/test/redirect-tests.txt @@ -1,5 +1,6 @@ /nova/latest/addmethod.openstackapi.html 301 /nova/latest/contributor/api-2.html /nova/latest/admin/flavors2.html 301 /nova/latest/admin/flavors.html +/nova/latest/admin/quotas2.html 301 /nova/latest/admin/quotas.html /nova/latest/admin/numa.html 301 /nova/latest/admin/cpu-topologies.html /nova/latest/aggregates.html 301 /nova/latest/user/aggregates.html /nova/latest/api_microversion_dev.html 301 /nova/latest/contributor/microversions.html @@ -14,12 +15,12 @@ /nova/latest/development.environment.html 301 /nova/latest/contributor/development-environment.html /nova/latest/devref/api.html 301 /nova/latest/contributor/api.html /nova/latest/devref/cells.html 301 /nova/latest/user/cells.html -/nova/latest/devref/filter_scheduler.html 301 /nova/latest/user/filter-scheduler.html +/nova/latest/devref/filter_scheduler.html 301 /nova/latest/admin/scheduling.html # catch all, if we hit something in devref assume it moved to # reference unless we have already triggered a hit above. /nova/latest/devref/any-page.html 301 /nova/latest/reference/any-page.html /nova/latest/feature_classification.html 301 /nova/latest/user/feature-classification.html -/nova/latest/filter_scheduler.html 301 /nova/latest/user/filter-scheduler.html +/nova/latest/filter_scheduler.html 301 /nova/latest/admin/scheduling.html /nova/latest/gmr.html 301 /nova/latest/reference/gmr.html /nova/latest/how_to_get_involved.html 301 /nova/latest/contributor/how-to-get-involved.html /nova/latest/i18n.html 301 /nova/latest/reference/i18n.html @@ -27,14 +28,11 @@ /nova/latest/man/nova-api-metadata.html 301 /nova/latest/cli/nova-api-metadata.html /nova/latest/man/nova-api-os-compute.html 301 /nova/latest/cli/nova-api-os-compute.html /nova/latest/man/nova-api.html 301 /nova/latest/cli/nova-api.html -/nova/latest/man/nova-cells.html 301 /nova/latest/cli/nova-cells.html # this is gone and never coming back, indicate that to the end users +/nova/latest/man/nova-cells.html 301 /nova/latest/cli/nova-cells.html /nova/latest/man/nova-compute.html 301 /nova/latest/cli/nova-compute.html /nova/latest/man/nova-conductor.html 301 /nova/latest/cli/nova-conductor.html -/nova/latest/man/nova-console.html 301 /nova/latest/cli/nova-console.html -/nova/latest/man/nova-consoleauth.html 301 /nova/latest/cli/nova-consoleauth.html /nova/latest/man/nova-dhcpbridge.html 301 /nova/latest/cli/nova-dhcpbridge.html -/nova/latest/man/nova-idmapshift.html 301 /nova/latest/cli/nova-idmapshift.html /nova/latest/man/nova-manage.html 301 /nova/latest/cli/nova-manage.html /nova/latest/man/nova-network.html 301 /nova/latest/cli/nova-network.html /nova/latest/man/nova-novncproxy.html 301 /nova/latest/cli/nova-novncproxy.html @@ -43,7 +41,6 @@ /nova/latest/man/nova-serialproxy.html 301 /nova/latest/cli/nova-serialproxy.html /nova/latest/man/nova-spicehtml5proxy.html 301 /nova/latest/cli/nova-spicehtml5proxy.html /nova/latest/man/nova-status.html 301 /nova/latest/cli/nova-status.html -/nova/latest/man/nova-xvpvncproxy.html 301 /nova/latest/cli/nova-xvpvncproxy.html /nova/latest/notifications.html 301 /nova/latest/reference/notifications.html /nova/latest/placement.html 301 /nova/latest/user/placement.html /nova/latest/placement_dev.html 301 /nova/latest/contributor/placement.html @@ -65,8 +62,22 @@ /nova/latest/testing/serial-console.html 301 /nova/latest/contributor/testing/serial-console.html /nova/latest/testing/zero-downtime-upgrade.html 301 /nova/latest/contributor/testing/zero-downtime-upgrade.html /nova/latest/threading.html 301 /nova/latest/reference/threading.html -/nova/latest/upgrade.html 301 /nova/latest/user/upgrade.html -/nova/latest/vendordata.html 301 /nova/latest/user/vendordata.html +/nova/latest/upgrade.html 301 /nova/latest/admin/upgrades.html +/nova/latest/user/aggregates.html 301 /nova/latest/admin/aggregates.html +/nova/latest/user/cellsv2_layout.html 301 /nova/latest/user/cellsv2-layout.html +/nova/latest/user/config-drive.html 301 /nova/latest/user/metadata.html +/nova/latest/user/filter-scheduler.html 301 /nova/latest/admin/scheduling.html +/nova/latest/user/metadata-service.html 301 /nova/latest/user/metadata.html +/nova/latest/user/placement.html 301 /placement/latest/ +/nova/latest/user/user-data.html 301 /nova/latest/user/metadata.html +/nova/latest/user/upgrade.html 301 /nova/latest/admin/upgrades.html +/nova/latest/user/vendordata.html 301 /nova/latest/user/metadata.html +/nova/latest/vendordata.html 301 /nova/latest/user/metadata.html /nova/latest/vmstates.html 301 /nova/latest/reference/vm-states.html /nova/latest/wsgi.html 301 /nova/latest/user/wsgi.html -/nova/latest/user/cellsv2_layout.html 301 /nova/latest/user/cellsv2-layout.html +/nova/latest/admin/adv-config.html 301 /nova/latest/admin/index.html +/nova/latest/admin/configuration/schedulers.html 301 /nova/latest/admin/scheduling.html +/nova/latest/admin/system-admin.html 301 /nova/latest/admin/index.html +/nova/latest/admin/port_with_resource_request.html 301 /nova/latest/admin/ports-with-resource-requests.html +/nova/latest/admin/manage-users.html 301 /nova/latest/admin/arch.html +/nova/latest/admin/mitigation-for-Intel-MDS-security-flaws.html 301 /nova/latest/admin/cpu-models.html diff --git a/etc/nova/README-policy.yaml.txt b/etc/nova/README-policy.yaml.txt index 7599f807127..aee7a49f81b 100644 --- a/etc/nova/README-policy.yaml.txt +++ b/etc/nova/README-policy.yaml.txt @@ -9,16 +9,3 @@ the top level of the nova directory: For a pre-generated example of the latest nova policy.yaml, see: https://docs.openstack.org/nova/latest/configuration/sample-policy.html - - -Placement -========= - -To generate the sample placement policy.yaml file, run the following command -from the top level of the nova directory: - - tox -e genplacementpolicy - -For a pre-generated example of the latest placement policy.yaml, see: - - https://docs.openstack.org/nova/latest/configuration/sample-placement-policy.html diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index 8bde418be4a..7e20eaa7e20 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -18,23 +18,29 @@ paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory [composite:osapi_compute] use = call:nova.api.openstack.urlmap:urlmap_factory /: oscomputeversions +/v2: oscomputeversion_legacy_v2 +/v2.1: oscomputeversion_v2 # v21 is an exactly feature match for v2, except it has more stringent # input validation on the wsgi surface (prevents fuzzing early on the # API). It also provides new features via API microversions which are # opt into for clients. Unaware clients will receive the same frozen # v2 API feature set, but with some relaxed validation -/v2: openstack_compute_api_v21_legacy_v2_compatible -/v2.1: openstack_compute_api_v21 +/v2/+: openstack_compute_api_v21_legacy_v2_compatible +/v2.1/+: openstack_compute_api_v21 [composite:openstack_compute_api_v21] use = call:nova.api.auth:pipeline_factory_v21 -noauth2 = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler noauth2 osapi_compute_app_v21 keystone = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler authtoken keystonecontext osapi_compute_app_v21 +# DEPRECATED: The [api]auth_strategy conf option is deprecated and will be +# removed in a subsequent release, whereupon this pipeline will be unreachable. +noauth2 = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler noauth2 osapi_compute_app_v21 [composite:openstack_compute_api_v21_legacy_v2_compatible] use = call:nova.api.auth:pipeline_factory_v21 -noauth2 = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler noauth2 legacy_v2_compatible osapi_compute_app_v21 keystone = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler authtoken keystonecontext legacy_v2_compatible osapi_compute_app_v21 +# DEPRECATED: The [api]auth_strategy conf option is deprecated and will be +# removed in a subsequent release, whereupon this pipeline will be unreachable. +noauth2 = cors http_proxy_to_wsgi compute_req_id faultwrap request_log sizelimit osprofiler noauth2 legacy_v2_compatible osapi_compute_app_v21 [filter:request_log] paste.filter_factory = nova.api.openstack.requestlog:RequestLog.factory @@ -45,6 +51,8 @@ paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory [filter:faultwrap] paste.filter_factory = nova.api.openstack:FaultWrapper.factory +# DEPRECATED: NoAuthMiddleware will be removed in a subsequent release, +# whereupon this filter will cease to function. [filter:noauth2] paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory @@ -66,9 +74,18 @@ paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory [pipeline:oscomputeversions] pipeline = cors faultwrap request_log http_proxy_to_wsgi oscomputeversionapp +[pipeline:oscomputeversion_v2] +pipeline = cors compute_req_id faultwrap request_log http_proxy_to_wsgi oscomputeversionapp_v2 + +[pipeline:oscomputeversion_legacy_v2] +pipeline = cors compute_req_id faultwrap request_log http_proxy_to_wsgi legacy_v2_compatible oscomputeversionapp_v2 + [app:oscomputeversionapp] paste.app_factory = nova.api.openstack.compute.versions:Versions.factory +[app:oscomputeversionapp_v2] +paste.app_factory = nova.api.openstack.compute.versions:VersionsV2.factory + ########## # Shared # ########## diff --git a/etc/nova/cells.json b/etc/nova/cells.json deleted file mode 100644 index cc74930d4d7..00000000000 --- a/etc/nova/cells.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "parent": { - "name": "parent", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": true - }, - "cell1": { - "name": "cell1", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit1.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": false - }, - "cell2": { - "name": "cell2", - "api_url": "http://api.example.com:8774", - "transport_url": "rabbit://rabbit2.example.com", - "weight_offset": 0.0, - "weight_scale": 1.0, - "is_parent": false - } -} diff --git a/etc/nova/logging_sample.conf b/etc/nova/logging_sample.conf index 68418192429..985e0c0146b 100644 --- a/etc/nova/logging_sample.conf +++ b/etc/nova/logging_sample.conf @@ -41,7 +41,7 @@ qualname = boto # NOTE(mikal): suds is used by the vmware driver, removing this will # cause many extraneous log lines for their tempest runs. Refer to -# https://review.openstack.org/#/c/219225/ for details. +# https://review.opendev.org/#/c/219225/ for details. [logger_suds] level = INFO handlers = stderr diff --git a/etc/nova/nova-config-generator.conf b/etc/nova/nova-config-generator.conf index 4bdcbd4833a..3cf4da9f31e 100644 --- a/etc/nova/nova-config-generator.conf +++ b/etc/nova/nova-config-generator.conf @@ -1,15 +1,16 @@ [DEFAULT] output_file = etc/nova/nova.conf.sample wrap_width = 80 +summarize = true namespace = nova.conf namespace = oslo.log namespace = oslo.messaging namespace = oslo.policy +namespace = oslo.privsep namespace = oslo.service.periodic_task namespace = oslo.service.service -namespace = oslo.db -namespace = oslo.db.concurrency namespace = oslo.middleware namespace = oslo.concurrency +namespace = oslo.reports namespace = keystonemiddleware.auth_token namespace = osprofiler diff --git a/etc/nova/placement-policy-generator.conf b/etc/nova/placement-policy-generator.conf deleted file mode 100644 index a2e0697d000..00000000000 --- a/etc/nova/placement-policy-generator.conf +++ /dev/null @@ -1,5 +0,0 @@ -[DEFAULT] -# TODO: When placement is split out of the nova repo, this can change to -# etc/placement/policy.yaml.sample. -output_file = etc/nova/placement-policy.yaml.sample -namespace = placement diff --git a/etc/nova/rootwrap.d/api-metadata.filters b/etc/nova/rootwrap.d/api-metadata.filters deleted file mode 100644 index 1aa6f83e68d..00000000000 --- a/etc/nova/rootwrap.d/api-metadata.filters +++ /dev/null @@ -1,13 +0,0 @@ -# nova-rootwrap command filters for api-metadata nodes -# This is needed on nova-api hosts running with "metadata" in enabled_apis -# or when running nova-api-metadata -# This file should be owned by (and only-writeable by) the root user - -[Filters] -# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... -iptables-save: CommandFilter, iptables-save, root -ip6tables-save: CommandFilter, ip6tables-save, root - -# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) -iptables-restore: CommandFilter, iptables-restore, root -ip6tables-restore: CommandFilter, ip6tables-restore, root diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters index 0ef8c90194c..4ccb5b4c9f9 100644 --- a/etc/nova/rootwrap.d/compute.filters +++ b/etc/nova/rootwrap.d/compute.filters @@ -2,157 +2,8 @@ # This file should be owned by (and only-writeable by) the root user [Filters] - -# nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path -# nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device -blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.* - -# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' -# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' -# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev -# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. -# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. -# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. -# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. -# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) -# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] -# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge -# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. -# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. -# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... -# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. -# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' -# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' -# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. -# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. -# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' -# nova/network/linux_net.py: 'ip', 'route', 'add', .. -# nova/network/linux_net.py: 'ip', 'route', 'del', . -# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev -ip: CommandFilter, ip, root - -# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev -# nova/network/linux_net.py: 'tunctl', '-b', '-t', dev -tunctl: CommandFilter, tunctl, root - -# nova/virt/libvirt/vif.py: 'ovs-vsctl', ... -# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... -# nova/network/linux_net.py: 'ovs-vsctl', .... -ovs-vsctl: CommandFilter, ovs-vsctl, root - -# nova/network/linux_net.py: 'ivs-ctl', .... -ivs-ctl: CommandFilter, ivs-ctl, root - -# nova/network/linux_net.py: 'ovs-ofctl', .... -ovs-ofctl: CommandFilter, ovs-ofctl, root - -# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ... -iscsiadm: CommandFilter, iscsiadm, root - -# nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev -# nova/virt/libvirt/volume/aoe.py: 'aoe-discover' -aoe-revalidate: CommandFilter, aoe-revalidate, root -aoe-discover: CommandFilter, aoe-discover, root - -# nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path -pygrub: CommandFilter, pygrub, root - -# nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s -fdisk: CommandFilter, fdisk, root - -# nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path -# nova/virt/disk/api.py: e2fsck, -f, -p, image -e2fsck: CommandFilter, e2fsck, root - -# nova/virt/xenapi/vm_utils.py: resize2fs, partition_path -# nova/virt/disk/api.py: resize2fs, image -resize2fs: CommandFilter, resize2fs, root - -# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... -iptables-save: CommandFilter, iptables-save, root -ip6tables-save: CommandFilter, ip6tables-save, root - -# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) -iptables-restore: CommandFilter, iptables-restore, root -ip6tables-restore: CommandFilter, ip6tables-restore, root - -# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... -# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. -arping: CommandFilter, arping, root - -# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address -dhcp_release: CommandFilter, dhcp_release, root - -# nova/network/linux_net.py: 'kill', '-9', pid -# nova/network/linux_net.py: 'kill', '-HUP', pid -kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP - -# nova/network/linux_net.py: 'kill', pid -kill_radvd: KillFilter, root, /usr/sbin/radvd - -# nova/network/linux_net.py: dnsmasq call -dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq - -# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. -radvd: CommandFilter, radvd, root - -# nova/network/linux_net.py: 'brctl', 'addbr', bridge -# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 -# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' -# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface -brctl: CommandFilter, brctl, root - -# nova/virt/libvirt/utils.py: 'mkswap' -# nova/virt/xenapi/vm_utils.py: 'mkswap' -mkswap: CommandFilter, mkswap, root - -# nova/virt/xenapi/vm_utils.py: 'mkfs' -# nova/utils.py: 'mkfs', fs, path, label -mkfs: CommandFilter, mkfs, root - -# nova/virt/libvirt/utils.py: 'qemu-img' -qemu-img: CommandFilter, qemu-img, root - -# nova/virt/disk/api.py: -mkfs.ext3: CommandFilter, mkfs.ext3, root -mkfs.ext4: CommandFilter, mkfs.ext4, root -mkfs.ntfs: CommandFilter, mkfs.ntfs, root - -# os-brick needed commands -read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi -multipath: CommandFilter, multipath, root -# multipathd show status -multipathd: CommandFilter, multipathd, root -systool: CommandFilter, systool, root -vgc-cluster: CommandFilter, vgc-cluster, root -# os_brick/initiator/connector.py -drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid - -# TODO(smcginnis) Temporary fix. -# Need to pull in os-brick os-brick.filters file instead and clean -# out stale brick values from this file. -scsi_id: CommandFilter, /lib/udev/scsi_id, root # os_brick.privileged.default oslo.privsep context -# This line ties the superuser privs with the config files, context name, -# and (implicitly) the actual python code invoked. privsep-rootwrap-os_brick: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* +# nova.privsep.sys_admin_pctxt oslo.privsep context privsep-rootwrap-sys_admin: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, nova.privsep.sys_admin_pctxt, --privsep_sock_path, /tmp/.* - -# nova/virt/libvirt/storage/dmcrypt.py: -cryptsetup: CommandFilter, cryptsetup, root - -# nova/virt/xenapi/vm_utils.py: -xenstore-read: CommandFilter, xenstore-read, root - -# nova/virt/libvirt/utils.py: -rbd: CommandFilter, rbd, root - -# nova/virt/libvirt/volume/volume.py: 'cp', '/dev/stdin', delete_control.. -cp: CommandFilter, cp, root - -# nova/virt/xenapi/vm_utils.py: -sync: CommandFilter, sync, root - -# nova/virt/libvirt/volume/vzstorage.py -pstorage-mount: CommandFilter, pstorage-mount, root diff --git a/etc/nova/rootwrap.d/network.filters b/etc/nova/rootwrap.d/network.filters deleted file mode 100644 index 52b7130ea89..00000000000 --- a/etc/nova/rootwrap.d/network.filters +++ /dev/null @@ -1,91 +0,0 @@ -# nova-rootwrap command filters for network nodes -# This file should be owned by (and only-writeable by) the root user - -[Filters] -# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' -# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' -# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev -# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. -# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. -# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. -# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. -# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) -# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] -# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge -# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. -# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. -# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... -# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. -# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' -# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' -# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. -# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. -# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' -# nova/network/linux_net.py: 'ip', 'route', 'add', .. -# nova/network/linux_net.py: 'ip', 'route', 'del', . -# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev -ip: CommandFilter, ip, root - -# nova/virt/libvirt/vif.py: 'ovs-vsctl', ... -# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... -# nova/network/linux_net.py: 'ovs-vsctl', .... -ovs-vsctl: CommandFilter, ovs-vsctl, root - -# nova/network/linux_net.py: 'ovs-ofctl', .... -ovs-ofctl: CommandFilter, ovs-ofctl, root - -# nova/virt/libvirt/vif.py: 'ivs-ctl', ... -# nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ... -# nova/network/linux_net.py: 'ivs-ctl', .... -ivs-ctl: CommandFilter, ivs-ctl, root - -# nova/virt/libvirt/vif.py: 'ifc_ctl', ... -ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root - -# nova/network/linux_net.py: 'ebtables', '-D' ... -# nova/network/linux_net.py: 'ebtables', '-I' ... -ebtables: CommandFilter, ebtables, root -ebtables_usr: CommandFilter, ebtables, root - -# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... -iptables-save: CommandFilter, iptables-save, root -ip6tables-save: CommandFilter, ip6tables-save, root - -# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) -iptables-restore: CommandFilter, iptables-restore, root -ip6tables-restore: CommandFilter, ip6tables-restore, root - -# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... -# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. -arping: CommandFilter, arping, root - -# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address -dhcp_release: CommandFilter, dhcp_release, root - -# nova/network/linux_net.py: 'kill', '-9', pid -# nova/network/linux_net.py: 'kill', '-HUP', pid -kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP - -# nova/network/linux_net.py: 'kill', pid -kill_radvd: KillFilter, root, /usr/sbin/radvd - -# nova/network/linux_net.py: dnsmasq call -dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq - -# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. -radvd: CommandFilter, radvd, root - -# nova/network/linux_net.py: 'brctl', 'addbr', bridge -# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 -# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' -# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface -brctl: CommandFilter, brctl, root - -# nova/network/linux_net.py: 'sysctl', .... -sysctl: CommandFilter, sysctl, root - -# nova/network/linux_net.py: 'conntrack' -conntrack: CommandFilter, conntrack, root - -# nova/network/linux_net.py: 'fp-vdev' -fp-vdev: CommandFilter, fp-vdev, root diff --git a/gate/post_test_hook.sh b/gate/post_test_hook.sh index b27274b049a..28ad9b939ea 100755 --- a/gate/post_test_hook.sh +++ b/gate/post_test_hook.sh @@ -5,13 +5,27 @@ MANAGE="/usr/local/bin/nova-manage" function archive_deleted_rows { # NOTE(danms): Run this a few times to make sure that we end # up with nothing more to archive + if ! $MANAGE db archive_deleted_rows --verbose --before "$(date -d yesterday)" 2>&1 | grep 'Nothing was archived'; then + echo "Archiving yesterday data should have done nothing" + return 1 + fi for i in `seq 30`; do - $MANAGE $* db archive_deleted_rows --verbose --max_rows 1000 + if [[ $i -eq 1 ]]; then + # This is just a test wrinkle to make sure we're covering the + # non-all-cells (cell0) case, as we're not passing in the cell1 + # config. + $MANAGE db archive_deleted_rows --verbose --max_rows 50 --before "$(date -d tomorrow)" + else + $MANAGE db archive_deleted_rows --verbose --max_rows 1000 --before "$(date -d tomorrow)" --all-cells + fi RET=$? if [[ $RET -gt 1 ]]; then echo Archiving failed with result $RET return $RET - elif [[ $RET -eq 0 ]]; then + # When i = 1, we only archive cell0 (without --all-cells), so run at + # least twice to ensure --all-cells are archived before considering + # archiving complete. + elif [[ $RET -eq 0 && $i -gt 1 ]]; then echo Archiving Complete break; fi @@ -30,24 +44,36 @@ function purge_db { } BASE=${BASE:-/opt/stack} -source ${BASE}/new/devstack/functions-common -source ${BASE}/new/devstack/lib/nova -cell_conf=$(conductor_conf 1) -# NOTE(danms): We need to pass the main config to get the api db -# bits, and then also the cell config for the cell1 db (instead of -# the cell0 config that is in the main config file). Later files -# take precedence. -conf="--config-file $NOVA_CONF --config-file $cell_conf" - -archive_deleted_rows $conf -purge_db +source ${BASE}/devstack/functions-common +source ${BASE}/devstack/lib/nova + +# This needs to go before 'set -e' because otherwise the intermediate runs of +# 'nova-manage db archive_deleted_rows' returning 1 (normal and expected) would +# cause this script to exit and fail. +archive_deleted_rows set -e + +# This needs to go after 'set -e' because otherwise a failure to purge the +# database would not cause this script to exit and fail. +purge_db + # We need to get the admin credentials to run the OSC CLIs for Placement. set +x -source $BASE/new/devstack/openrc admin +source $BASE/devstack/openrc admin set -x +# Verify whether instances were archived from all cells. Admin credentials are +# needed to list deleted instances across all projects. +echo "Verifying that instances were archived from all cells" +deleted_servers=$(openstack server list --deleted --all-projects -c ID -f value) + +# Fail if any deleted servers were found. +if [[ -n "$deleted_servers" ]]; then + echo "There were unarchived instances found after archiving; failing." + exit 1 +fi + # TODO(mriedem): Consider checking for instances in ERROR state because # if there are any, we would expect them to retain allocations in Placement # and therefore we don't really need to check for leaked allocations. @@ -73,3 +99,175 @@ if [[ $LEAKED_ALLOCATIONS -eq 1 ]]; then exit 1 fi echo "Resource provider allocations were cleaned up properly." + + +# Test "nova-manage placement heal_allocations" by creating a server, deleting +# its allocations in placement, and then running heal_allocations and assert +# the allocations were healed as expected. + +function get_binding_profile_value +{ + # Returns the value of the key in the binding profile if exsits or return + # empty. + local port=${1} + local key=${2} + local print_value='import sys, json; print(json.load(sys.stdin).get("binding_profile", {}).get("'${key}'", ""))' + openstack port show ${port} -f json -c binding_profile \ + | /usr/bin/env python3 -c "${print_value}" +} + +echo "Creating port with bandwidth request for heal_allocations testing" +openstack network create net0 \ + --provider-network-type vlan \ + --provider-physical-network public \ + --provider-segment 100 + +openstack subnet create subnet0 \ + --network net0 \ + --subnet-range 10.0.4.0/24 \ + +openstack network qos policy create qp0 +openstack network qos rule create qp0 \ + --type minimum-bandwidth \ + --min-kbps 1000 \ + --egress + +openstack network qos rule create qp0 \ + --type minimum-bandwidth \ + --min-kbps 1000 \ + --ingress + +openstack port create port-normal-qos \ + --network net0 \ + --vnic-type normal \ + --qos-policy qp0 + +# Let's make the binding:profile for this port contain some +# (non-allocation-y) stuff and then later assert that this stuff is still +# there after the heal. +# Cf. https://review.opendev.org/#/c/637955/35/nova/cmd/manage.py@1896 +openstack port set port-normal-qos --binding-profile my_key=my_value + +image_id=$(openstack image list -f value -c ID | awk 'NR==1{print $1}') +flavor_id=$(openstack flavor list -f value -c ID | awk 'NR==1{print $1}') +network_id=$(openstack network list --no-share -f value -c ID | awk 'NR==1{print $1}') + +echo "Creating server for heal_allocations testing" +# microversion 2.72 introduced the support for bandwidth aware ports +openstack --os-compute-api-version 2.72 \ +server create --image ${image_id} --flavor ${flavor_id} \ +--nic net-id=${network_id} --nic port-id=port-normal-qos \ +--wait heal-allocations-test +server_id=$(openstack server show heal-allocations-test -f value -c id) + +# Make sure there are allocations for the consumer. +allocations=$(openstack resource provider allocation show ${server_id} \ + -c resources -f value) +if [[ "$allocations" == "" ]]; then + echo "No allocations found for the server." + exit 2 +fi + +# Make sure that the binding:profile.allocation key is updated +rp_uuid=$(get_binding_profile_value port-normal-qos "allocation") +if [[ "$rp_uuid" == "" ]]; then + echo "No allocation found for the bandwidth aware port." + exit 2 +fi + +# Make sure our extra key in the binding:profile is still there +my_key=$(get_binding_profile_value port-normal-qos "my_key") +if [[ "$my_key" == "" ]]; then + echo "During port binding the binding:profile was overwritten." + exit 2 +fi + +echo "Deleting allocations in placement for the server" +openstack resource provider allocation delete ${server_id} + +echo "Deleting allocation key from the binding:profile of the bandwidth aware port" +openstack port unset --binding-profile allocation port-normal-qos + +# Make sure the allocations are gone. +allocations=$(openstack resource provider allocation show ${server_id} \ + -c resources -f value) +if [[ "$allocations" != "" ]]; then + echo "Server allocations were not deleted." + exit 2 +fi + +# Make sure that the binding:profile.allocation key is gone +null_rp_uuid=$(get_binding_profile_value port-normal-qos "allocation") +if [[ "$null_rp_uuid" != "" ]]; then + echo "Binding profile not updated for the bandwidth aware port." + exit 2 +fi + +# Make sure our extra key in the binding:profile is still there +my_key=$(get_binding_profile_value port-normal-qos "my_key") +if [[ "$my_key" == "" ]]; then + echo "During deletion of allocation key our extra key was also deleted from the binding:profile." + exit 2 +fi + +echo "Healing allocations" +# First test with the --dry-run over all instances in all cells. +set +e +nova-manage placement heal_allocations --verbose --dry-run +rc=$? +set -e +# Since we did not create allocations because of --dry-run the rc should be 4. +if [[ ${rc} -ne 4 ]]; then + echo "Expected return code 4 from heal_allocations with --dry-run" + exit 2 +fi +# Now test with just the single instance and actually perform the heal. +nova-manage placement heal_allocations --verbose --instance ${server_id} + +# Make sure there are allocations for the consumer. +allocations=$(openstack resource provider allocation show ${server_id} \ + -c resources -f value) +if [[ "$allocations" == "" ]]; then + echo "Failed to heal allocations." + exit 2 +fi + +# Make sure that the allocations contains bandwidth as well +bandwidth_allocations=$(echo "$allocations" | grep NET_BW_EGR_KILOBIT_PER_SEC) +if [[ "$bandwidth_allocations" == "" ]]; then + echo "Failed to heal port allocations." + exit 2 +fi + +# Make sure that the binding:profile.allocation key healed back +healed_rp_uuid=$(get_binding_profile_value port-normal-qos "allocation") +if [[ "$rp_uuid" != "$healed_rp_uuid" ]]; then + echo "The value of the allocation key of the bandwidth aware port does not match." + echo "expected: $rp_uuid; actual: $healed_rp_uuid." + exit 2 +fi + +# Make sure our extra key in the binding:profile is still there +my_key=$(get_binding_profile_value port-normal-qos "allocation") +if [[ "$my_key" == "" ]]; then + echo "During heal port allocation our extra key in the binding:profile was deleted." + exit 2 +fi + +echo "Verifying online_data_migrations idempotence" +# We will re-use the server created earlier for this test. (A server needs to +# be present during the run of online_data_migrations and archiving). + +# Run the online data migrations before archiving. +$MANAGE db online_data_migrations + +# We need to archive the deleted marker instance used by the +# fill_virtual_interface_list online data migration in order to trigger +# creation of a new deleted marker instance. +set +e +archive_deleted_rows +set -e + +# Verify whether online data migrations run after archiving will succeed. +# See for more details: https://bugs.launchpad.net/nova/+bug/1824435 +$MANAGE db online_data_migrations diff --git a/lower-constraints.txt b/lower-constraints.txt deleted file mode 100644 index c2a27a41dd3..00000000000 --- a/lower-constraints.txt +++ /dev/null @@ -1,174 +0,0 @@ -alembic==0.9.8 -amqp==2.2.2 -appdirs==1.4.3 -asn1crypto==0.24.0 -attrs==17.4.0 -automaton==1.14.0 -Babel==2.3.4 -enum34==1.0.4 -bandit==1.1.0 -bcrypt==3.1.4 -cachetools==2.0.1 -castellan==0.16.0 -certifi==2018.1.18 -cffi==1.11.5 -chardet==3.0.4 -cliff==2.11.0 -cmd2==0.8.1 -colorama==0.3.9 -contextlib2==0.5.5 -coverage==4.0 -cryptography==2.1 -cursive==0.2.1 -ddt==1.0.1 -debtcollector==1.19.0 -decorator==3.4.0 -deprecation==2.0 -dogpile.cache==0.6.5 -enum-compat==0.0.2 -eventlet==0.18.2 -extras==1.0.0 -fasteners==0.14.1 -fixtures==3.0.0 -flake8==2.5.5 -future==0.16.0 -futurist==1.6.0 -futures==3.0.0 -gabbi==1.35.0 -gitdb2==2.0.3 -GitPython==2.1.8 -greenlet==0.4.10 -hacking==0.12.0 -idna==2.6 -iso8601==0.1.11 -Jinja2==2.10 -jmespath==0.9.3 -jsonpatch==1.21 -jsonpath-rw-ext==1.1.3 -jsonpath-rw==1.4.0 -jsonpointer==2.0 -jsonschema==2.6.0 -keystoneauth1==3.9.0 -keystonemiddleware==4.17.0 -kombu==4.1.0 -linecache2==1.0.0 -lxml==3.4.1 -Mako==1.0.7 -MarkupSafe==1.0 -mccabe==0.2.1 -microversion-parse==0.2.1 -mock==2.0.0 -monotonic==1.4 -mox3==0.20.0 -msgpack==0.5.6 -munch==2.2.0 -netaddr==0.7.18 -netifaces==0.10.4 -networkx==1.11 -numpy==1.14.2 -openstacksdk==0.12.0 -os-brick==2.5.0 -os-client-config==1.29.0 -os-service-types==1.2.0 -os-traits==0.4.0 -os-vif==1.7.0 -os-win==3.0.0 -os-xenapi==0.3.3 -osc-lib==1.10.0 -oslo.cache==1.26.0 -oslo.concurrency==3.26.0 -oslo.config==6.1.0 -oslo.context==2.19.2 -oslo.db==4.27.0 -oslo.i18n==3.15.3 -oslo.log==3.36.0 -oslo.messaging==6.3.0 -oslo.middleware==3.31.0 -oslo.policy==1.35.0 -oslo.privsep==1.23.0 -oslo.reports==1.18.0 -oslo.rootwrap==5.8.0 -oslo.serialization==2.18.0 -oslo.service==1.24.0 -oslo.utils==3.33.0 -oslo.versionedobjects==1.31.2 -oslo.vmware==2.17.0 -oslotest==3.2.0 -osprofiler==1.4.0 -packaging==17.1 -paramiko==2.0.0 -Paste==2.0.2 -PasteDeploy==1.5.0 -pbr==2.0.0 -pep8==1.5.7 -pika-pool==0.1.3 -pika==0.10.0 -pluggy==0.6.0 -ply==3.11 -prettytable==0.7.1 -psutil==3.2.2 -psycopg2==2.6.2 -py==1.5.2 -pyasn1-modules==0.2.1 -pyasn1==0.4.2 -pycadf==2.7.0 -pycparser==2.18 -pyflakes==0.8.1 -pyinotify==0.9.6 -PyMySQL==0.7.6 -PyNaCl==1.2.1 -pyOpenSSL==17.5.0 -pyparsing==2.2.0 -pyperclip==1.6.0 -pypowervm==1.1.15 -pyroute2==0.4.21 -pytest==3.4.2 -python-barbicanclient==4.5.2 -python-cinderclient==3.3.0 -python-dateutil==2.5.3 -python-editor==1.0.3 -python-glanceclient==2.8.0 -python-ironicclient==2.3.0 -python-keystoneclient==3.15.0 -python-mimeparse==1.6.0 -python-neutronclient==6.7.0 -python-subunit==1.2.0 -pytz==2018.3 -PyYAML==3.12 -repoze.lru==0.7 -requests-mock==1.2.0 -requests==2.14.2 -requestsexceptions==1.4.0 -retrying==1.3.3 -rfc3986==0.3.1 -Routes==2.3.1 -simplejson==3.13.2 -six==1.10.0 -smmap2==2.0.3 -sqlalchemy-migrate==0.11.0 -SQLAlchemy==1.0.10 -sqlparse==0.2.4 -statsd==3.2.2 -stestr==1.0.0 -stevedore==1.20.0 -setuptools==21.0.0 -suds-jurko==0.6 -taskflow==2.16.0 -Tempita==0.5.2 -tenacity==4.9.0 -testrepository==0.0.20 -testresources==2.0.0 -testscenarios==0.4 -testtools==2.2.0 -tooz==1.58.0 -traceback2==1.4.0 -unittest2==1.1.0 -urllib3==1.22 -vine==1.1.4 -voluptuous==0.11.1 -warlock==1.3.0 -WebOb==1.8.2 -websockify==0.8.0 -wrapt==1.10.11 -wsgi-intercept==1.7.0 -zVMCloudConnector==1.1.1 diff --git a/mypy-files.txt b/mypy-files.txt new file mode 100644 index 00000000000..898eee25c7c --- /dev/null +++ b/mypy-files.txt @@ -0,0 +1,17 @@ +nova/compute/manager.py +nova/crypto.py +nova/network/neutron.py +nova/pci +nova/privsep/path.py +nova/scheduler/client/report.py +nova/scheduler/request_filter.py +nova/scheduler/utils.py +nova/virt/driver.py +nova/virt/hardware.py +nova/virt/libvirt/machine_type_utils.py +nova/virt/libvirt/__init__.py +nova/virt/libvirt/driver.py +nova/virt/libvirt/event.py +nova/virt/libvirt/guest.py +nova/virt/libvirt/host.py +nova/virt/libvirt/utils.py diff --git a/nova/__init__.py b/nova/__init__.py index 228c89d33dc..b8044faecee 100644 --- a/nova/__init__.py +++ b/nova/__init__.py @@ -22,14 +22,3 @@ :platform: Unix :synopsis: Infrastructure-as-a-Service Cloud platform. """ - -import os - -os.environ['EVENTLET_NO_GREENDNS'] = 'yes' - -# NOTE(rpodolyaka): import oslo_service first, so that it makes eventlet hub -# use a monotonic clock to avoid issues with drifts of system time (see -# LP 1510234 for details) -import oslo_service # noqa - -import eventlet # noqa diff --git a/nova/api/ec2/__init__.py b/nova/accelerator/__init__.py similarity index 100% rename from nova/api/ec2/__init__.py rename to nova/accelerator/__init__.py diff --git a/nova/accelerator/cyborg.py b/nova/accelerator/cyborg.py new file mode 100644 index 00000000000..020adc9dc91 --- /dev/null +++ b/nova/accelerator/cyborg.py @@ -0,0 +1,407 @@ +# Copyright 2019 Intel +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from keystoneauth1 import exceptions as ks_exc + +from nova import exception +from nova.i18n import _ +from nova import objects +from nova.scheduler import utils as schedutils +from nova import service_auth +from nova import utils + +""" + Note on object relationships: + 1 device profile (DP) has D >= 1 request groups (just as a flavor + has many request groups). + Each DP request group corresponds to exactly 1 numbered request + group (RG) in the request spec. + Each numbered RG corresponds to exactly one resource provider (RP). + A DP request group may request A >= 1 accelerators, and so result + in the creation of A ARQs. + Each ARQ corresponds to exactly 1 DP request group. + + A device profile is a dictionary: + { "name": "mydpname", + "uuid": , + "groups": [ ] + } + + A device profile group is a dictionary too: + { "resources:CUSTOM_ACCELERATOR_FPGA": "2", + "resources:CUSTOM_LOCAL_MEMORY": "1", + "trait:CUSTOM_INTEL_PAC_ARRIA10": "required", + "trait:CUSTOM_FUNCTION_NAME_FALCON_GZIP_1_1": "required", + # 0 or more Cyborg properties + "accel:bitstream_id": "FB021995_BF21_4463_936A_02D49D4DB5E5" + } + + See cyborg/cyborg/objects/device_profile.py for more details. +""" + +LOG = logging.getLogger(__name__) + + +def get_client(context): + return _CyborgClient(context) + + +def get_device_profile_group_requester_id(dp_group_id, owner): + """Return the value to use in objects.RequestGroup.requester_id. + + The requester_id is used to match device profile groups from + Cyborg to the request groups in request spec. The request group id should + be unique for each dp in the flavor and in the port. + + :param dp_group_id: The index of the request group in the device profile. + :param owner: The port UUID if the dp requested by port. + """ + req_id = ("device_profile_" + str(dp_group_id) + + (str(owner) if owner else '')) + return req_id + + +def get_arq_pci_device_profile(arq): + """Extracting pci device info from ARQ + """ + pci_info = arq['attach_handle_info'] + return { + 'physical_network': pci_info["physical_network"], + 'pci_slot': "%s:%s:%s.%s" % ( + pci_info["domain"], pci_info["bus"], + pci_info["device"], pci_info["function"]), + 'arq_uuid': arq['uuid'] + } + + +def get_device_profile_request_groups(context, dp_name, owner=None): + cyclient = get_client(context) + dp_groups = cyclient.get_device_profile_groups(dp_name) + return cyclient.get_device_request_groups(dp_groups, owner) + + +def get_device_amount_of_dp_groups(dp_groups): + """Get requested devices amount for the groups of + a device_profile. + + :param dp_groups: list of request groups in a device profile. + """ + devices_amount = 0 + for _ignore, dp_group in enumerate(dp_groups): + for key, val in dp_group.items(): + match = schedutils.ResourceRequest.XS_KEYPAT.match(key) + if not match: + continue # could be 'accel:foo=bar', skip it + prefix, _, _ = match.groups() + if prefix == schedutils.ResourceRequest.XS_RES_PREFIX: + devices_amount += int(val) + return devices_amount + + +class _CyborgClient(object): + DEVICE_PROFILE_URL = "/device_profiles" + ARQ_URL = "/accelerator_requests" + + def __init__(self, context): + auth = service_auth.get_auth_plugin(context) + self._client = utils.get_ksa_adapter('accelerator', ksa_auth=auth) + + def _call_cyborg(self, func, *args, **kwargs): + resp = err_msg = None + try: + resp = func(*args, **kwargs) + if not resp: + msg = _('Invalid response from Cyborg: ') + err_msg = msg + str(resp) + except ks_exc.ClientException as exc: + err_msg = _('Could not communicate with Cyborg.') + LOG.exception('%s: %s', err_msg, str(exc)) + + return resp, err_msg + + def _get_device_profile_list(self, dp_name): + query = {"name": dp_name} + err_msg = None + + resp, err_msg = self._call_cyborg(self._client.get, + self.DEVICE_PROFILE_URL, params=query) + + if err_msg: + raise exception.DeviceProfileError(name=dp_name, msg=err_msg) + + return resp.json().get('device_profiles') + + def get_device_profile_groups(self, dp_name): + """Get device groups from a device profile. + + :param dp_name: string: device profile name + Expected to be valid, not None or ''. + :returns: [device profile group dict] + :raises: DeviceProfileError + Expected to be valid, not None or ''. + """ + dp_list = self._get_device_profile_list(dp_name) + if not dp_list: + msg = _('Expected 1 device profile but got nothing.') + raise exception.DeviceProfileError(name=dp_name, msg=msg) + if len(dp_list) != 1: + err = _('Expected 1 device profile but got %s.') % len(dp_list) + raise exception.DeviceProfileError(name=dp_name, msg=err) + return dp_list[0]['groups'] + + def get_device_request_groups(self, dp_groups, owner): + """Get list of profile group objects from the device profile. + + :param dp_groups: device groups of a device profile. + :param owner: The port UUID if the dp requested by port. + :returns: [objects.RequestGroup] + :raises: DeviceProfileError + """ + request_groups = [] + for dp_group_id, dp_group in enumerate(dp_groups): + req_id = get_device_profile_group_requester_id(dp_group_id, owner) + rg = objects.RequestGroup(requester_id=req_id) + for key, val in dp_group.items(): + match = schedutils.ResourceRequest.XS_KEYPAT.match(key) + if not match: + continue # could be 'accel:foo=bar', skip it + prefix, _ignore, name = match.groups() + if prefix == schedutils.ResourceRequest.XS_RES_PREFIX: + rg.add_resource(rclass=name, amount=val) + elif prefix == schedutils.ResourceRequest.XS_TRAIT_PREFIX: + rg.add_trait(trait_name=name, trait_type=val) + request_groups.append(rg) + return request_groups + + def _create_arqs(self, dp_name): + data = {"device_profile_name": dp_name} + resp, err_msg = self._call_cyborg(self._client.post, + self.ARQ_URL, json=data) + + if err_msg: + raise exception.AcceleratorRequestOpFailed( + op=_('create'), msg=err_msg) + + return resp.json().get('arqs') + + def create_arqs(self, dp_name): + """Create ARQs by dp_name.""" + LOG.info('Creating ARQs for device profile %s', dp_name) + arqs = self._create_arqs(dp_name) + if not arqs: + msg = _('device profile name %s') % dp_name + raise exception.AcceleratorRequestOpFailed(op=_('create'), msg=msg) + return arqs + + def create_arqs_and_match_resource_providers(self, dp_name, rg_rp_map): + """Create ARQs and match them with request groups and thereby + determine their corresponding RPs. + + :param dp_name: Device profile name + :param rg_rp_map: Request group - Resource Provider map + {requester_id: [resource_provider_uuid]} + :returns: + [arq], with each ARQ associated with an RP + :raises: DeviceProfileError, AcceleratorRequestOpFailed + """ + arqs = self.create_arqs(dp_name) + + for arq in arqs: + dp_group_id = arq['device_profile_group_id'] + arq['device_rp_uuid'] = None + requester_id = ( + get_device_profile_group_requester_id(dp_group_id, owner=None)) + arq['device_rp_uuid'] = rg_rp_map[requester_id][0] + return arqs + + def get_arq_device_rp_uuid(self, arq, rg_rp_map, owner): + """Query the ARQ by uuid saved in request_net. + """ + dp_group_id = arq['device_profile_group_id'] + requester_id = ( + get_device_profile_group_requester_id(dp_group_id, owner)) + + # ARQ and rp is 1:1 mapping + # One arq always associated with one placement request group and + # in placement one prefixed request group is always mapped to one RP. + return rg_rp_map[requester_id][0] + + def bind_arqs(self, bindings): + """Initiate Cyborg bindings. + + Handles RFC 6902-compliant JSON patching, sparing + calling Nova code from those details. + + :param bindings: + { "$arq_uuid": { + "hostname": STRING + "device_rp_uuid": UUID + "instance_uuid": UUID + }, + ... + } + :returns: nothing + :raises: AcceleratorRequestOpFailed + """ + LOG.info('Binding ARQs.') + # Create a JSON patch in RFC 6902 format + patch_list = {} + for arq_uuid, binding in bindings.items(): + patch = [{"path": "/" + field, + "op": "add", + "value": value + } for field, value in binding.items()] + patch_list[arq_uuid] = patch + + resp, err_msg = self._call_cyborg(self._client.patch, + self.ARQ_URL, json=patch_list) + if err_msg: + arq_uuids = bindings.keys() + msg = _(' Binding failed for ARQ UUIDs: ') + err_msg = err_msg + msg + ','.join(arq_uuids) + raise exception.AcceleratorRequestBindingFailed( + arqs=arq_uuids, msg=err_msg) + + def get_arqs_for_instance(self, instance_uuid, only_resolved=False): + """Get ARQs for the instance. + + :param instance_uuid: Instance UUID + :param only_resolved: flag to return only resolved ARQs + :returns: List of ARQs for the instance: + if only_resolved: only those ARQs which have completed binding + else: all ARQs + The format of the returned data structure is as below: + [ + {'uuid': $arq_uuid, + 'device_profile_name': $dp_name, + 'device_profile_group_id': $dp_request_group_index, + 'state': 'Bound', + 'device_rp_uuid': $resource_provider_uuid, + 'hostname': $host_nodename, + 'instance_uuid': $instance_uuid, + 'attach_handle_info': { # PCI bdf + 'bus': '0c', 'device': '0', + 'domain': '0000', 'function': '0'}, + 'attach_handle_type': 'PCI' + # or 'TEST_PCI' for Cyborg fake driver + } + ] + :raises: AcceleratorRequestOpFailed + """ + query = {"instance": instance_uuid} + resp, err_msg = self._call_cyborg(self._client.get, + self.ARQ_URL, params=query) + + if err_msg: + err_msg = err_msg + _(' Instance %s') % instance_uuid + raise exception.AcceleratorRequestOpFailed( + op=_('get'), msg=err_msg) + + arqs = resp.json().get('arqs') + if not arqs: + err_msg = _('Cyborg returned no accelerator requests for ' + 'instance %s') % instance_uuid + raise exception.AcceleratorRequestOpFailed( + op=_('get'), msg=err_msg) + + if only_resolved: + arqs = [arq for arq in arqs if + arq['state'] in ['Bound', 'BindFailed', 'Deleting']] + return arqs + + def get_arq_by_uuid(self, arq_uuid): + """Get ARQs by uuid. + + The format of the returned data structure is as below: + + {'uuid': $arq_uuid, + 'device_profile_name': $dp_name, + 'device_profile_group_id': $dp_request_group_index, + 'state': 'Bound', + 'device_rp_uuid': $resource_provider_uuid, + 'hostname': $host_nodename, + 'instance_uuid': $instance_uuid, + 'attach_handle_info': { # PCI bdf + 'bus': '0c', 'device': '0', + 'domain': '0000', 'function': '0'}, + 'attach_handle_type': 'PCI' + # or 'TEST_PCI' for Cyborg fake driver + } + + :raises: AcceleratorRequestOpFailed + """ + resp, err_msg = self._call_cyborg(self._client.get, + "/".join([self.ARQ_URL, arq_uuid])) + + if err_msg: + err_msg = err_msg + _(' ARQ: %s') % arq_uuid + raise exception.AcceleratorRequestOpFailed( + op=_('get'), msg=err_msg) + + arq = resp.json() + + return arq + + def delete_arqs_for_instance(self, instance_uuid): + """Delete ARQs for instance, after unbinding if needed. + + :param instance_uuid: Instance UUID + :raises: AcceleratorRequestOpFailed + """ + # Unbind and delete the ARQs + params = {"instance": instance_uuid} + resp, err_msg = self._call_cyborg(self._client.delete, + self.ARQ_URL, params=params) + if err_msg: + msg = err_msg + _(' Instance %s') % instance_uuid + raise exception.AcceleratorRequestOpFailed( + op=_('delete'), msg=msg) + + def delete_arqs_by_uuid(self, arq_uuids): + """Delete the specified ARQs, unbinding them if needed. + + This is meant to be used to clean up ARQs that have failed to bind + to an instance. So delete_arqs_for_instance() is not applicable. + + This Cyborg API call is NOT idempotent, i.e., if called more than + once, the 2nd and later calls will throw errors. + + Cyborg deletes the ARQs without error, or returns 404 if there is ARQ + which already deleted. In either way, existed ARQs in arq_uuids wil be + deleted. Such 404 error can be ignored safely. + + If this fails, an error is logged but no exception is raised + because this cleans up Cyborg resources, but should otherwise + not affect instance spawn. + + :params arq_uuids: dict_keys() of ARQ UUIDs + """ + arq_uuid_str = ','.join(arq_uuids) + params = {'arqs': arq_uuid_str} + resp, err_msg = self._call_cyborg(self._client.delete, + self.ARQ_URL, params=params) + if err_msg: + # No point raising an exception. + LOG.error('Failed to delete ARQs %s', arq_uuid_str) + + def get_arq_uuids_for_instance(self, instance): + """Get ARQ UUIDs for the instance. + + :param instance: Instance Object + :return: ARQ UUIDs. + """ + return [arq['uuid'] + for arq in self.get_arqs_for_instance(instance.uuid)] diff --git a/nova/api/auth.py b/nova/api/auth.py index 4663d6444a9..420e2dc3339 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -53,7 +53,16 @@ def pipeline_factory(loader, global_conf, **local_conf): def pipeline_factory_v21(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" - return _load_pipeline(loader, local_conf[CONF.api.auth_strategy].split()) + auth_strategy = CONF.api.auth_strategy + if auth_strategy == 'noauth2': + versionutils.report_deprecated_feature( + LOG, + "'[api]auth_strategy=noauth2' is deprecated as of the 21.0.0 " + "Ussuri release and will be removed in a future release. Please " + "remove any 'noauth2' entries from api-paste.ini; only the " + "'keystone' pipeline is supported." + ) + return _load_pipeline(loader, local_conf[auth_strategy].split()) class InjectContext(wsgi.Middleware): @@ -72,6 +81,14 @@ def __call__(self, req): class NovaKeystoneContext(wsgi.Middleware): """Make a request context from keystone headers.""" + @staticmethod + def _create_context(env, **kwargs): + """Create a context from a request environ. + + This exists to make test stubbing easier. + """ + return context.RequestContext.from_environ(env, **kwargs) + @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): # Build a context, including the auth_token... @@ -92,7 +109,7 @@ def __call__(self, req): # middleware in newer versions. user_auth_plugin = req.environ.get('keystone.token_auth') - ctx = context.RequestContext.from_environ( + ctx = self._create_context( req.environ, user_auth_plugin=user_auth_plugin, remote_address=remote_address, diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py deleted file mode 100644 index ea88ec0376e..00000000000 --- a/nova/api/ec2/cloud.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from oslo_log import versionutils - -LOG = logging.getLogger(__name__) - - -class CloudController(object): - def __init__(self): - versionutils.report_deprecated_feature( - LOG, - 'The in tree EC2 API has been removed in Mitaka. ' - 'Please remove entries from api-paste.ini and use ' - 'the OpenStack ec2-api project ' - 'http://git.openstack.org/cgit/openstack/ec2-api/' - ) diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py deleted file mode 100644 index 3134e309e51..00000000000 --- a/nova/api/ec2/ec2utils.py +++ /dev/null @@ -1,465 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import re - -from oslo_log import log as logging -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six - -from nova import cache_utils -from nova import context -from nova import exception -from nova.i18n import _ -from nova.network import model as network_model -from nova import objects -from nova.objects import base as obj_base - -LOG = logging.getLogger(__name__) -# NOTE(vish): cache mapping for one week -_CACHE_TIME = 7 * 24 * 60 * 60 -_CACHE = None - - -def memoize(func): - @functools.wraps(func) - def memoizer(context, reqid): - global _CACHE - if not _CACHE: - _CACHE = cache_utils.get_client(expiration_time=_CACHE_TIME) - key = "%s:%s" % (func.__name__, reqid) - key = str(key) - value = _CACHE.get(key) - if value is None: - value = func(context, reqid) - _CACHE.set(key, value) - return value - return memoizer - - -def reset_cache(): - global _CACHE - _CACHE = None - - -def image_type(image_type): - """Converts to a three letter image type. - - aki, kernel => aki - ari, ramdisk => ari - anything else => ami - - """ - if image_type == 'kernel': - return 'aki' - if image_type == 'ramdisk': - return 'ari' - if image_type not in ['aki', 'ari']: - return 'ami' - return image_type - - -def resource_type_from_id(context, resource_id): - """Get resource type by ID - - Returns a string representation of the Amazon resource type, if known. - Returns None on failure. - - :param context: context under which the method is called - :param resource_id: resource_id to evaluate - """ - - known_types = { - 'i': 'instance', - 'r': 'reservation', - 'vol': 'volume', - 'snap': 'snapshot', - 'ami': 'image', - 'aki': 'image', - 'ari': 'image' - } - - type_marker = resource_id.split('-')[0] - - return known_types.get(type_marker) - - -@memoize -def id_to_glance_id(context, image_id): - """Convert an internal (db) id to a glance id.""" - return objects.S3ImageMapping.get_by_id(context, image_id).uuid - - -@memoize -def glance_id_to_id(context, glance_id): - """Convert a glance id to an internal (db) id.""" - if not glance_id: - return - try: - return objects.S3ImageMapping.get_by_uuid(context, glance_id).id - except exception.NotFound: - s3imap = objects.S3ImageMapping(context, uuid=glance_id) - s3imap.create() - return s3imap.id - - -def ec2_id_to_glance_id(context, ec2_id): - image_id = ec2_id_to_id(ec2_id) - return id_to_glance_id(context, image_id) - - -def glance_id_to_ec2_id(context, glance_id, image_type='ami'): - image_id = glance_id_to_id(context, glance_id) - if image_id is None: - return - return image_ec2_id(image_id, image_type=image_type) - - -def ec2_id_to_id(ec2_id): - """Convert an ec2 ID (i-[base 16 number]) to an instance id (int).""" - try: - return int(ec2_id.split('-')[-1], 16) - except ValueError: - raise exception.InvalidEc2Id(ec2_id=ec2_id) - - -def image_ec2_id(image_id, image_type='ami'): - """Returns image ec2_id using id and three letter type.""" - template = image_type + '-%08x' - return id_to_ec2_id(image_id, template=template) - - -def get_ip_info_for_instance_from_nw_info(nw_info): - if not isinstance(nw_info, network_model.NetworkInfo): - nw_info = network_model.NetworkInfo.hydrate(nw_info) - - ip_info = {} - fixed_ips = nw_info.fixed_ips() - ip_info['fixed_ips'] = [ip['address'] for ip in fixed_ips - if ip['version'] == 4] - ip_info['fixed_ip6s'] = [ip['address'] for ip in fixed_ips - if ip['version'] == 6] - ip_info['floating_ips'] = [ip['address'] for ip in nw_info.floating_ips()] - - return ip_info - - -def get_ip_info_for_instance(context, instance): - """Return a dictionary of IP information for an instance.""" - - if isinstance(instance, obj_base.NovaObject): - nw_info = instance.info_cache.network_info - else: - # FIXME(comstud): Temporary as we transition to objects. - info_cache = instance.info_cache or {} - nw_info = info_cache.get('network_info') - # Make sure empty response is turned into the model - if not nw_info: - nw_info = [] - return get_ip_info_for_instance_from_nw_info(nw_info) - - -def id_to_ec2_id(instance_id, template='i-%08x'): - """Convert an instance ID (int) to an ec2 ID (i-[base 16 number]).""" - return template % int(instance_id) - - -def id_to_ec2_inst_id(instance_id): - """Get or create an ec2 instance ID (i-[base 16 number]) from uuid.""" - if instance_id is None: - return None - elif uuidutils.is_uuid_like(instance_id): - ctxt = context.get_admin_context() - int_id = get_int_id_from_instance_uuid(ctxt, instance_id) - return id_to_ec2_id(int_id) - else: - return id_to_ec2_id(instance_id) - - -def ec2_inst_id_to_uuid(context, ec2_id): - """"Convert an instance id to uuid.""" - int_id = ec2_id_to_id(ec2_id) - return get_instance_uuid_from_int_id(context, int_id) - - -@memoize -def get_instance_uuid_from_int_id(context, int_id): - imap = objects.EC2InstanceMapping.get_by_id(context, int_id) - return imap.uuid - - -def id_to_ec2_snap_id(snapshot_id): - """Get or create an ec2 volume ID (vol-[base 16 number]) from uuid.""" - if uuidutils.is_uuid_like(snapshot_id): - ctxt = context.get_admin_context() - int_id = get_int_id_from_snapshot_uuid(ctxt, snapshot_id) - return id_to_ec2_id(int_id, 'snap-%08x') - else: - return id_to_ec2_id(snapshot_id, 'snap-%08x') - - -def id_to_ec2_vol_id(volume_id): - """Get or create an ec2 volume ID (vol-[base 16 number]) from uuid.""" - if uuidutils.is_uuid_like(volume_id): - ctxt = context.get_admin_context() - int_id = get_int_id_from_volume_uuid(ctxt, volume_id) - return id_to_ec2_id(int_id, 'vol-%08x') - else: - return id_to_ec2_id(volume_id, 'vol-%08x') - - -def ec2_vol_id_to_uuid(ec2_id): - """Get the corresponding UUID for the given ec2-id.""" - ctxt = context.get_admin_context() - - # NOTE(jgriffith) first strip prefix to get just the numeric - int_id = ec2_id_to_id(ec2_id) - return get_volume_uuid_from_int_id(ctxt, int_id) - - -_ms_time_regex = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3,6}Z$') - - -def status_to_ec2_attach_status(volume): - """Get the corresponding EC2 attachment state. - - According to EC2 API, the valid attachment status in response is: - attaching | attached | detaching | detached - """ - volume_status = volume.get('status') - attach_status = volume.get('attach_status') - if volume_status in ('attaching', 'detaching'): - ec2_attach_status = volume_status - elif attach_status in ('attached', 'detached'): - ec2_attach_status = attach_status - else: - msg = _("Unacceptable attach status:%s for ec2 API.") % attach_status - raise exception.Invalid(msg) - return ec2_attach_status - - -def is_ec2_timestamp_expired(request, expires=None): - """Checks the timestamp or expiry time included in an EC2 request - and returns true if the request is expired - """ - timestamp = request.get('Timestamp') - expiry_time = request.get('Expires') - - def parse_strtime(strtime): - if _ms_time_regex.match(strtime): - # NOTE(MotoKen): time format for aws-sdk-java contains millisecond - time_format = "%Y-%m-%dT%H:%M:%S.%fZ" - else: - time_format = "%Y-%m-%dT%H:%M:%SZ" - return timeutils.parse_strtime(strtime, time_format) - - try: - if timestamp and expiry_time: - msg = _("Request must include either Timestamp or Expires," - " but cannot contain both") - LOG.error(msg) - raise exception.InvalidRequest(msg) - elif expiry_time: - query_time = parse_strtime(expiry_time) - return timeutils.is_older_than(query_time, -1) - elif timestamp: - query_time = parse_strtime(timestamp) - - # Check if the difference between the timestamp in the request - # and the time on our servers is larger than 5 minutes, the - # request is too old (or too new). - if query_time and expires: - return timeutils.is_older_than(query_time, expires) or \ - timeutils.is_newer_than(query_time, expires) - return False - except ValueError: - LOG.info("Timestamp is invalid.") - return True - - -@memoize -def get_int_id_from_instance_uuid(context, instance_uuid): - if instance_uuid is None: - return - try: - imap = objects.EC2InstanceMapping.get_by_uuid(context, instance_uuid) - return imap.id - except exception.NotFound: - imap = objects.EC2InstanceMapping(context) - imap.uuid = instance_uuid - imap.create() - return imap.id - - -@memoize -def get_int_id_from_volume_uuid(context, volume_uuid): - if volume_uuid is None: - return - try: - vmap = objects.EC2VolumeMapping.get_by_uuid(context, volume_uuid) - return vmap.id - except exception.NotFound: - vmap = objects.EC2VolumeMapping(context) - vmap.uuid = volume_uuid - vmap.create() - return vmap.id - - -@memoize -def get_volume_uuid_from_int_id(context, int_id): - vmap = objects.EC2VolumeMapping.get_by_id(context, int_id) - return vmap.uuid - - -def ec2_snap_id_to_uuid(ec2_id): - """Get the corresponding UUID for the given ec2-id.""" - ctxt = context.get_admin_context() - - # NOTE(jgriffith) first strip prefix to get just the numeric - int_id = ec2_id_to_id(ec2_id) - return get_snapshot_uuid_from_int_id(ctxt, int_id) - - -@memoize -def get_int_id_from_snapshot_uuid(context, snapshot_uuid): - if snapshot_uuid is None: - return - try: - smap = objects.EC2SnapshotMapping.get_by_uuid(context, snapshot_uuid) - return smap.id - except exception.NotFound: - smap = objects.EC2SnapshotMapping(context, uuid=snapshot_uuid) - smap.create() - return smap.id - - -@memoize -def get_snapshot_uuid_from_int_id(context, int_id): - smap = objects.EC2SnapshotMapping.get_by_id(context, int_id) - return smap.uuid - - -_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') - - -def camelcase_to_underscore(str): - return _c2u.sub(r'_\1', str).lower().strip('_') - - -def _try_convert(value): - """Return a non-string from a string or unicode, if possible. - - ============= ===================================================== - When value is returns - ============= ===================================================== - zero-length '' - 'None' None - 'True' True case insensitive - 'False' False case insensitive - '0', '-0' 0 - 0xN, -0xN int from hex (positive) (N is any number) - 0bN, -0bN int from binary (positive) (N is any number) - * try conversion to int, float, complex, fallback value - - """ - def _negative_zero(value): - epsilon = 1e-7 - return 0 if abs(value) < epsilon else value - - if len(value) == 0: - return '' - if value == 'None': - return None - lowered_value = value.lower() - if lowered_value == 'true': - return True - if lowered_value == 'false': - return False - for prefix, base in [('0x', 16), ('0b', 2), ('0', 8), ('', 10)]: - try: - if lowered_value.startswith((prefix, "-" + prefix)): - return int(lowered_value, base) - except ValueError: - pass - try: - return _negative_zero(float(value)) - except ValueError: - return value - - -def dict_from_dotted_str(items): - """parse multi dot-separated argument into dict. - EBS boot uses multi dot-separated arguments like - BlockDeviceMapping.1.DeviceName=snap-id - Convert the above into - {'block_device_mapping': {'1': {'device_name': snap-id}}} - """ - args = {} - for key, value in items: - parts = key.split(".") - key = str(camelcase_to_underscore(parts[0])) - if isinstance(value, six.string_types): - # NOTE(vish): Automatically convert strings back - # into their respective values - value = _try_convert(value) - - if len(parts) > 1: - d = args.get(key, {}) - args[key] = d - for k in parts[1:-1]: - k = camelcase_to_underscore(k) - v = d.get(k, {}) - d[k] = v - d = v - d[camelcase_to_underscore(parts[-1])] = value - else: - args[key] = value - - return args - - -def search_opts_from_filters(filters): - return {f['name'].replace('-', '_'): f['value']['1'] - for f in filters if f['value']['1']} if filters else {} - - -def regex_from_ec2_regex(ec2_re): - """Converts an EC2-style regex to a python regex. - Approach is based on python fnmatch. - """ - - iter_ec2_re = iter(ec2_re) - - py_re = '' - for char in iter_ec2_re: - if char == '*': - py_re += '.*' - elif char == '?': - py_re += '.' - elif char == '\\': - try: - next_char = next(iter_ec2_re) - except StopIteration: - next_char = '' - if next_char == '*' or next_char == '?': - py_re += '[%s]' % next_char - else: - py_re += '\\\\' + next_char - else: - py_re += re.escape(char) - return '\A%s\Z(?s)' % py_re diff --git a/nova/api/manager.py b/nova/api/manager.py deleted file mode 100644 index 32f800e1b5a..00000000000 --- a/nova/api/manager.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import manager -from nova.network import driver -from nova import utils - - -class MetadataManager(manager.Manager): - """Metadata Manager. - - This class manages the Metadata API service initialization. Currently, it - just adds an iptables filter rule for the metadata service. - """ - def __init__(self, *args, **kwargs): - super(MetadataManager, self).__init__(*args, **kwargs) - - if not utils.is_neutron(): - # NOTE(mikal): we only add iptables rules if we're running - # under nova-network. This code should go away when the - # deprecation of nova-network is complete. - self.network_driver = driver.load_network_driver() - self.network_driver.metadata_accept() diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py index b4cd397c4e2..aef1354ab3b 100644 --- a/nova/api/metadata/base.py +++ b/nova/api/metadata/base.py @@ -16,6 +16,7 @@ """Instance Metadata information.""" +import itertools import os import posixpath @@ -23,20 +24,16 @@ from oslo_serialization import base64 from oslo_serialization import jsonutils from oslo_utils import timeutils -import six -from nova.api.ec2 import ec2utils from nova.api.metadata import password from nova.api.metadata import vendordata_dynamic from nova.api.metadata import vendordata_json from nova import block_device -from nova.cells import opts as cells_opts -from nova.cells import rpcapi as cells_rpcapi import nova.conf from nova import context from nova import exception -from nova import network -from nova.network.security_group import openstack_driver +from nova.network import neutron +from nova.network import security_group_api from nova import objects from nova.objects import virt_device_metadata as metadata_obj from nova import utils @@ -63,8 +60,8 @@ # hidden from the listing, but can still be requested explicitly, which is # required for testing purposes. We know this isn't great, but its inherited # from EC2, which this needs to be compatible with. -# NOTE(jichen): please update doc/source/user/metadata-service.rst on the -# metadata output when new version is created in order to make doc up-to-date. +# NOTE(jichen): please update doc/source/user/metadata.rst on the metadata +# output when new version is created in order to make doc up-to-date. FOLSOM = '2012-08-10' GRIZZLY = '2013-04-04' HAVANA = '2013-10-17' @@ -73,6 +70,7 @@ NEWTON_TWO = '2016-10-06' OCATA = '2017-02-22' ROCKY = '2018-08-27' +VICTORIA = '2020-10-14' OPENSTACK_VERSIONS = [ FOLSOM, @@ -83,6 +81,7 @@ NEWTON_TWO, OCATA, ROCKY, + VICTORIA, ] VERSION = "version" @@ -112,8 +111,7 @@ class InstanceMetadata(object): """Instance metadata.""" def __init__(self, instance, address=None, content=None, extra_md=None, - network_info=None, network_metadata=None, - request_context=None): + network_info=None, network_metadata=None): """Creation of this object should basically cover all time consuming collection. Methods after that should not cause time delays due to network operations or lengthy cpu operations. @@ -124,8 +122,13 @@ def __init__(self, instance, address=None, content=None, extra_md=None, if not content: content = [] + # NOTE(gibi): this is not a cell targeted context even if we are called + # in a situation when the instance is in a different cell than the + # metadata service itself. ctxt = context.get_admin_context() + self.mappings = _format_instance_mapping(instance) + # NOTE(danms): Sanitize the instance to limit the amount of stuff # inside that may not pickle well (i.e. context). We also touch # some of the things we'll lazy load later to make sure we keep their @@ -133,6 +136,7 @@ def __init__(self, instance, address=None, content=None, extra_md=None, instance.ec2_ids instance.keypairs instance.device_metadata + instance.numa_topology instance = objects.Instance.obj_from_primitive( instance.obj_to_primitive()) @@ -143,12 +147,9 @@ def __init__(self, instance, address=None, content=None, extra_md=None, self.availability_zone = instance.get('availability_zone') - secgroup_api = openstack_driver.get_openstack_security_group_driver() - self.security_groups = secgroup_api.get_instance_security_groups( + self.security_groups = security_group_api.get_instance_security_groups( ctxt, instance) - self.mappings = _format_instance_mapping(ctxt, instance) - if instance.user_data is not None: self.userdata_raw = base64.decode_as_bytes(instance.user_data) else: @@ -176,8 +177,7 @@ def __init__(self, instance, address=None, content=None, extra_md=None, else: self.network_metadata = network_metadata - self.ip_info = \ - ec2utils.get_ip_info_for_instance_from_nw_info(network_info) + self.ip_info = netutils.get_ec2_ip_info(network_info) self.network_config = None cfg = netutils.get_injected_network_template(network_info) @@ -205,12 +205,9 @@ def __init__(self, instance, address=None, content=None, extra_md=None, # contain the admin password for the instance, and we shouldn't # pass that to external services. self.vendordata_providers = { - 'StaticJSON': vendordata_json.JsonFileVendorData( - instance=instance, address=address, - extra_md=extra_md, network_info=network_info), + 'StaticJSON': vendordata_json.JsonFileVendorData(), 'DynamicJSON': vendordata_dynamic.DynamicVendorData( - instance=instance, address=address, - network_info=network_info, context=request_context) + instance=instance) } def _route_configuration(self): @@ -281,17 +278,9 @@ def get_ec2_metadata(self, version): meta_data['public-hostname'] = hostname meta_data['public-ipv4'] = floating_ip - if False and self._check_version('2007-03-01', version): - # TODO(vish): store product codes - meta_data['product-codes'] = [] - if self._check_version('2007-08-29', version): - instance_type = self.instance.get_flavor() - meta_data['instance-type'] = instance_type['name'] - - if False and self._check_version('2007-10-10', version): - # TODO(vish): store ancestor ids - meta_data['ancestor-ami-ids'] = [] + flavor = self.instance.get_flavor() + meta_data['instance-type'] = flavor['name'] if self._check_version('2007-12-15', version): meta_data['block-device-mapping'] = self.mappings @@ -335,23 +324,12 @@ def _metadata_as_json(self, version, path): metadata['network_config'] = self.network_config if self.instance.key_name: - if cells_opts.get_cell_type() == 'compute': - cells_api = cells_rpcapi.CellsAPI() - try: - keypair = cells_api.get_keypair_at_top( - context.get_admin_context(), self.instance.user_id, - self.instance.key_name) - except exception.KeypairNotFound: - # NOTE(lpigueir): If keypair was deleted, treat - # it like it never had any - keypair = None - else: - keypairs = self.instance.keypairs - # NOTE(mriedem): It's possible for the keypair to be deleted - # before it was migrated to the instance_extra table, in which - # case lazy-loading instance.keypairs will handle the 404 and - # just set an empty KeyPairList object on the instance. - keypair = keypairs[0] if keypairs else None + keypairs = self.instance.keypairs + # NOTE(mriedem): It's possible for the keypair to be deleted + # before it was migrated to the instance_extra table, in which + # case lazy-loading instance.keypairs will handle the 404 and + # just set an empty KeyPairList object on the instance. + keypair = keypairs[0] if keypairs else None if keypair: metadata['public_keys'] = { @@ -382,6 +360,9 @@ def _metadata_as_json(self, version, path): if self._check_os_version(NEWTON_ONE, version): metadata['devices'] = self._get_device_metadata(version) + if self._check_os_version(VICTORIA, version): + metadata['dedicated_cpus'] = self._get_instance_dedicated_cpus() + self.set_mimetype(MIME_TYPE_APPLICATION_JSON) return jsonutils.dump_as_bytes(metadata) @@ -435,8 +416,8 @@ def _get_device_metadata(self, version): device_metadata['mac'] = device.mac # NOTE(artom) If a device has neither tags, vlan or # vf_trusted, don't expose it - if not ('tags' in device or 'vlan' in device_metadata - or 'vf_trusted' in device_metadata): + if not ('tags' in device or 'vlan' in device_metadata or + 'vf_trusted' in device_metadata): continue elif isinstance(device, metadata_obj.DiskMetadata): device_metadata['type'] = 'disk' @@ -459,6 +440,15 @@ def _get_device_metadata(self, version): device_metadata_list.append(device_metadata) return device_metadata_list + def _get_instance_dedicated_cpus(self): + dedicated_cpus = [] + if self.instance.numa_topology: + dedicated_cpus = sorted(list(itertools.chain.from_iterable([ + cell.pcpuset for cell in self.instance.numa_topology.cells + ]))) + + return dedicated_cpus + def _handle_content(self, path_tokens): if len(path_tokens) == 1: raise KeyError("no listing for %s" % "/".join(path_tokens)) @@ -536,9 +526,12 @@ def _check_os_version(self, required, requested): return self._check_version(required, requested, OPENSTACK_VERSIONS) def _get_hostname(self): - return "%s%s%s" % (self.instance.hostname, - '.' if CONF.dhcp_domain else '', - CONF.dhcp_domain) + # TODO(stephenfin): At some point in the future, we may wish to + # retrieve this information from neutron. + if CONF.api.dhcp_domain: + return '.'.join([self.instance.hostname, CONF.api.dhcp_domain]) + + return self.instance.hostname def lookup(self, path): if path == "" or path[0] != "/": @@ -665,7 +658,7 @@ def handle_path(self, path_tokens): def get_metadata_by_address(address): ctxt = context.get_admin_context() - fixed_ip = network.API().get_fixed_ip_by_address(ctxt, address) + fixed_ip = neutron.API().get_fixed_ip_by_address(ctxt, address) LOG.info('Fixed IP %(ip)s translates to instance UUID %(uuid)s', {'ip': address, 'uuid': fixed_ip['instance_uuid']}) @@ -679,7 +672,13 @@ def get_metadata_by_instance_id(instance_id, address, ctxt=None): attrs = ['ec2_ids', 'flavor', 'info_cache', 'metadata', 'system_metadata', 'security_groups', 'keypairs', - 'device_metadata'] + 'device_metadata', 'numa_topology'] + + if CONF.api.local_metadata_per_cell: + instance = objects.Instance.get_by_uuid(ctxt, instance_id, + expected_attrs=attrs) + return InstanceMetadata(instance, address) + try: im = objects.InstanceMapping.get_by_instance_uuid(ctxt, instance_id) except exception.InstanceMappingNotFound: @@ -695,9 +694,8 @@ def get_metadata_by_instance_id(instance_id, address, ctxt=None): return InstanceMetadata(instance, address) -def _format_instance_mapping(ctxt, instance): - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - ctxt, instance.uuid) +def _format_instance_mapping(instance): + bdms = instance.get_bdms() return block_device.instance_block_mapping(instance, bdms) @@ -719,7 +717,7 @@ def ec2_md_print(data): return output[:-1] elif isinstance(data, list): return '\n'.join(data) - elif isinstance(data, (bytes, six.text_type)): + elif isinstance(data, (bytes, str)): return data else: return str(data) diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py index eb3e84e2e29..049935bc415 100644 --- a/nova/api/metadata/handler.py +++ b/nova/api/metadata/handler.py @@ -22,7 +22,7 @@ from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import secretutils as secutils -import six +from oslo_utils import strutils import webob.dec import webob.exc @@ -33,11 +33,17 @@ from nova import context as nova_context from nova import exception from nova.i18n import _ -from nova.network.neutronv2 import api as neutronapi +from nova.network import neutron as neutronapi CONF = nova.conf.CONF LOG = logging.getLogger(__name__) +# 160 networks is large enough to satisfy most cases. +# Yet while reaching 182 networks Neutron server will break as URL length +# exceeds the maximum. Left this at 160 to allow additional parameters when +# they're needed. +MAX_QUERY_NETWORKS = 160 + class MetadataRequestHandler(wsgi.Application): """Serve metadata.""" @@ -96,7 +102,12 @@ def __call__(self, req): req.response.content_type = base.MIME_TYPE_TEXT_PLAIN return req.response - LOG.debug('Metadata request headers: %s', req.headers) + # Convert webob.headers.EnvironHeaders to a dict and mask any sensitive + # details from the logs. + if CONF.debug: + headers = {k: req.headers[k] for k in req.headers} + LOG.debug('Metadata request headers: %s', + strutils.mask_dict_password(headers)) if CONF.neutron.service_metadata_proxy: if req.headers.get('X-Metadata-Provider'): meta_data = self._handle_instance_id_request_from_lb(req) @@ -139,8 +150,7 @@ def _handle_remote_ip_request(self, req): remote_address) msg = _('An unknown error has occurred. ' 'Please try your request again.') - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(msg)) + raise webob.exc.HTTPInternalServerError(explanation=str(msg)) if meta_data is None: LOG.error('Failed to get metadata for IP %s: no metadata', @@ -162,9 +172,9 @@ def _handle_instance_id_request(self, req): msg = _('X-Instance-ID-Signature header is missing from request.') elif tenant_id is None: msg = _('X-Tenant-ID header is missing from request.') - elif not isinstance(instance_id, six.string_types): + elif not isinstance(instance_id, str): msg = _('Multiple X-Instance-ID headers found within request.') - elif not isinstance(tenant_id, six.string_types): + elif not isinstance(tenant_id, str): msg = _('Multiple X-Tenant-ID headers found within request.') else: msg = None @@ -203,16 +213,24 @@ def _get_instance_id_from_lb(self, provider_id, instance_address): advanced_service_providers=[provider_id], fields=['network_id']) + if not md_subnets or not md_subnets.get('subnets'): + msg = _('Could not find any subnets for provider %s') % provider_id + LOG.error(msg) + raise webob.exc.HTTPBadRequest(explanation=msg) + md_networks = [subnet['network_id'] for subnet in md_subnets['subnets']] try: # Retrieve the instance data from the instance's port - instance_data = neutron.list_ports( - context, - fixed_ips='ip_address=' + instance_address, - network_id=md_networks, - fields=['device_id', 'tenant_id'])['ports'][0] + ports = [] + while md_networks: + ports.extend(neutron.list_ports( + context, + fixed_ips='ip_address=' + instance_address, + network_id=md_networks[:MAX_QUERY_NETWORKS], + fields=['device_id', 'tenant_id'])['ports']) + md_networks = md_networks[MAX_QUERY_NETWORKS:] except Exception as e: LOG.error('Failed to get instance id for metadata ' 'request, provider %(provider)s ' @@ -226,12 +244,23 @@ def _get_instance_id_from_lb(self, provider_id, instance_address): 'Please try your request again.') raise webob.exc.HTTPBadRequest(explanation=msg) + if len(ports) != 1: + msg = _('Expected a single port matching provider %(pr)s ' + 'and IP %(ip)s. Found %(count)d.') % { + 'pr': provider_id, + 'ip': instance_address, + 'count': len(ports)} + + LOG.error(msg) + raise webob.exc.HTTPBadRequest(explanation=msg) + + instance_data = ports[0] instance_id = instance_data['device_id'] tenant_id = instance_data['tenant_id'] # instance_data is unicode-encoded, while cache_utils doesn't like # that. Therefore we convert to str - if isinstance(instance_id, six.text_type): + if isinstance(instance_id, str): instance_id = instance_id.encode('utf-8') return instance_id, tenant_id @@ -252,10 +281,19 @@ def _handle_instance_id_request_from_lb(self, req): self._validate_shared_secret(provider_id, signature, instance_address) - instance_id, tenant_id = self._get_instance_id_from_lb( - provider_id, instance_address) - LOG.debug('Instance %s with address %s matches provider %s', - instance_id, remote_address, provider_id) + cache_key = 'provider-%s-%s' % (provider_id, instance_address) + data = self._cache.get(cache_key) + if data: + LOG.debug("Using cached metadata for %s for %s", + provider_id, instance_address) + instance_id, tenant_id = data + else: + instance_id, tenant_id = self._get_instance_id_from_lb( + provider_id, instance_address) + if CONF.api.metadata_cache_expiration > 0: + self._cache.set(cache_key, (instance_id, tenant_id)) + LOG.debug('Instance %s with address %s matches provider %s', + instance_id, remote_address, provider_id) return self._get_meta_by_instance_id(instance_id, tenant_id, instance_address) @@ -265,8 +303,8 @@ def _validate_shared_secret(self, requestor_id, signature, encodeutils.to_utf8(CONF.neutron.metadata_proxy_shared_secret), encodeutils.to_utf8(requestor_id), hashlib.sha256).hexdigest() - - if not secutils.constant_time_compare(expected_signature, signature): + if (not signature or + not secutils.constant_time_compare(expected_signature, signature)): if requestor_id: LOG.warning('X-Instance-ID-Signature: %(signature)s does ' 'not match the expected value: ' @@ -277,7 +315,6 @@ def _validate_shared_secret(self, requestor_id, signature, 'expected_signature': expected_signature, 'requestor_id': requestor_id, 'requestor_address': requestor_address}) - msg = _('Invalid proxy request signature.') raise webob.exc.HTTPForbidden(explanation=msg) @@ -290,8 +327,7 @@ def _get_meta_by_instance_id(self, instance_id, tenant_id, remote_address): instance_id) msg = _('An unknown error has occurred. ' 'Please try your request again.') - raise webob.exc.HTTPInternalServerError( - explanation=six.text_type(msg)) + raise webob.exc.HTTPInternalServerError(explanation=str(msg)) if meta_data is None: LOG.error('Failed to get metadata for instance id: %s', diff --git a/nova/api/metadata/password.py b/nova/api/metadata/password.py index c906de78908..a91e70a3a13 100644 --- a/nova/api/metadata/password.py +++ b/nova/api/metadata/password.py @@ -13,10 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. -import six -from six.moves import range from webob import exc +import nova.conf from nova import context from nova import exception from nova.i18n import _ @@ -24,6 +23,8 @@ from nova import utils +CONF = nova.conf.CONF + CHUNKS = 4 CHUNK_LENGTH = 255 MAX_SIZE = CHUNKS * CHUNK_LENGTH @@ -44,7 +45,7 @@ def convert_password(context, password): Password is stored with the keys 'password_0' -> 'password_3'. """ password = password or '' - if six.PY3 and isinstance(password, bytes): + if isinstance(password, bytes): password = password.decode('utf-8') meta = {} @@ -68,12 +69,18 @@ def handle_password(req, meta_data): msg = _("Request is too large.") raise exc.HTTPBadRequest(explanation=msg) - im = objects.InstanceMapping.get_by_instance_uuid(ctxt, meta_data.uuid) - with context.target_cell(ctxt, im.cell_mapping) as cctxt: - try: - instance = objects.Instance.get_by_uuid(cctxt, meta_data.uuid) - except exception.InstanceNotFound as e: - raise exc.HTTPBadRequest(explanation=e.format_message()) + if CONF.api.local_metadata_per_cell: + instance = objects.Instance.get_by_uuid(ctxt, meta_data.uuid) + else: + im = objects.InstanceMapping.get_by_instance_uuid( + ctxt, meta_data.uuid) + with context.target_cell(ctxt, im.cell_mapping) as cctxt: + try: + instance = objects.Instance.get_by_uuid( + cctxt, meta_data.uuid) + except exception.InstanceNotFound as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + instance.system_metadata.update(convert_password(ctxt, req.body)) instance.save() else: diff --git a/nova/api/metadata/vendordata_dynamic.py b/nova/api/metadata/vendordata_dynamic.py index 2db9a478e6a..3de9bb384d1 100644 --- a/nova/api/metadata/vendordata_dynamic.py +++ b/nova/api/metadata/vendordata_dynamic.py @@ -21,7 +21,6 @@ from keystoneauth1 import loading as ks_loading from oslo_log import log as logging from oslo_serialization import jsonutils -import six from nova.api.metadata import vendordata import nova.conf @@ -60,12 +59,7 @@ def _load_ks_session(conf): class DynamicVendorData(vendordata.VendorDataDriver): - def __init__(self, context=None, instance=None, address=None, - network_info=None): - # NOTE(mikal): address and network_info are unused, but can't be - # removed / renamed as this interface is shared with the static - # JSON plugin. - self.context = context + def __init__(self, instance): self.instance = instance # We only create the session if we make a request. self.session = None @@ -115,7 +109,7 @@ def _do_request(self, service_name, url): 'error': e}, instance=self.instance) if CONF.api.vendordata_dynamic_failure_fatal: - six.reraise(type(e), e, sys.exc_info()[2]) + raise e.with_traceback(sys.exc_info()[2]) return {} diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 13467c18023..abca626af10 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -17,6 +17,7 @@ """ WSGI middleware for OpenStack API controllers. """ +import nova.monkey_patch # noqa from oslo_log import log as logging import routes @@ -165,11 +166,7 @@ def _get_project_id_token(self): # NOTE(sdague): project_id parameter is only valid if its hex # or hex + dashes (note, integers are a subset of this). This # is required to hand our overlaping routes issues. - project_id_regex = '[0-9a-f\-]+' - if CONF.osapi_v21.project_id_regex: - project_id_regex = CONF.osapi_v21.project_id_regex - - return '{project_id:%s}' % project_id_regex + return '{project_id:[0-9a-f-]+}' def resource(self, member_name, collection_name, **kwargs): project_id_token = self._get_project_id_token() diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py index 058945ae5b1..6c205fbae99 100644 --- a/nova/api/openstack/api_version_request.py +++ b/nova/api/openstack/api_version_request.py @@ -158,6 +158,95 @@ /os-server-groups/{group_id} API. * 2.65 - Add support for abort live migrations in ``queued`` and ``preparing`` status. + * 2.66 - Add ``changes-before`` to support users to specify the + ``updated_at`` time to filter nova resources, the resources + include the servers API, os-instance-action API and + os-migrations API. + * 2.67 - Adds the optional ``volume_type`` field to the + ``block_device_mapping_v2`` parameter when creating a server. + * 2.68 - Remove support for forced live migration and evacuate server + actions. + * 2.69 - Add support for returning minimal constructs for ``GET /servers``, + ``GET /servers/detail``, ``GET /servers/{server_id}`` and + ``GET /os-services`` when there is a transient unavailability + condition in the deployment like an infrastructure failure. + * 2.70 - Exposes virtual device tags in the response of the + ``os-volume_attachments`` and ``os-interface`` APIs. + * 2.71 - Adds the ``server_groups`` field to ``GET /servers/{id}``, + ``PUT /servers/{server_id}`` and + ``POST /servers/{server_id}/action`` (rebuild) responses. + * 2.72 - Add support for neutron ports with resource request during server + create. Server move operations are not yet supported for servers + with such ports. + * 2.73 - Adds support for specifying a reason when locking the server and + exposes this via the response from ``GET /servers/detail``, + ``GET /servers/{server_id}``, ``PUT servers/{server_id}`` and + ``POST /servers/{server_id}/action`` where the action is rebuild. + It also supports ``locked`` as a filter/sort parameter for + ``GET /servers/detail`` and ``GET /servers``. + * 2.74 - Add support for specifying ``host`` and/or ``hypervisor_hostname`` + in request body to ``POST /servers``. Allow users to specify which + host/node they want their servers to land on and still be + validated by the scheduler. + * 2.75 - Multiple API cleanup listed below: + - 400 for unknown param for query param and for request body. + - Making server representation always consistent among GET, PUT + and Rebuild serevr APIs response. + - Change the default return value of swap field from the empty + string to 0 (integer) in flavor APIs. + - Return ``servers`` field always in the response of GET + hypervisors API even there are no servers on hypervisor. + * 2.76 - Adds ``power-update`` event to ``os-server-external-events`` API. + The changes to the power state of an instance caused by this event + can be viewed through + ``GET /servers/{server_id}/os-instance-actions`` and + ``GET /servers/{server_id}/os-instance-actions/{request_id}``. + * 2.77 - Add support for specifying ``availability_zone`` to unshelve of a + shelved offload server. + * 2.78 - Adds new API ``GET /servers/{server_id}/topology`` which shows + NUMA topology of a given server. + * 2.79 - Adds support for specifying ``delete_on_termination`` field in the + request body to + ``POST /servers/{server_id}/os-volume_attachments`` and exposes + this via the response from + ``POST /servers/{server_id}/os-volume_attachments``, + ``GET /servers/{server_id}/os-volume_attachments`` and + ``GET /servers/{server_id}/os-volume_attachments/{volume_id}``. + * 2.80 - Adds support for optional query parameters ``user_id`` and + ``project_id`` to the ``GET /os-migrations`` API and exposes + ``user_id`` and ``project_id`` via the response from + ``GET /os-migrations``, + ``GET /servers/{server_id}/migrations``, and + ``GET /servers/{server_id}/migrations/{migration_id}``. + * 2.81 - Adds support for image cache management by aggregate by adding + ``POST /os-aggregates/{aggregate_id}/images``. + * 2.82 - Adds ``accelerator-request-bound`` event to + ``os-server-external-events`` API. This event is sent by Cyborg + to indicate completion of ARQ binding. The ARQs can be obtained + from Cyborg with ``GET /v2/accelerator_requests?instance={uuid}`` + * 2.83 - Allow more filter parameters for ``GET /servers/detail`` and + ``GET /servers`` for non-admin. + * 2.84 - Adds ``details`` field to instance action events. + * 2.85 - Add support for + ``PUT /servers/{server_id}/os-volume_attachments/{volume_id}`` + which supports specifying the ``delete_on_termination`` field in + the request body to change the attached volume's flag. + * 2.86 - Add support for validation of known extra specs to the + ``POST /flavors/{flavor_id}/os-extra_specs`` and + ``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` APIs. + * 2.87 - Adds support for rescuing boot from volume instances when the + compute host reports the COMPUTE_BFV_RESCUE capability trait. + * 2.88 - Drop statistics-style fields from the ``/os-hypervisors/detail`` + and ``/os-hypervisors/{hypervisor_id}`` APIs, and remove the + ``/os-hypervisors/statistics`` and + ``/os-hypervisors/{hypervisor_id}/uptime`` APIs entirely. + * 2.89 - Add ``attachment_id``, ``bdm_uuid`` and remove ``id`` from the + responses of ``GET /servers/{server_id}/os-volume_attachments`` + and ``GET /servers/{server_id}/os-volume_attachments/{volume_id}`` + * 2.90 - Add support for requesting a specific hostname when creating, + updating or rebuilding an instance. The + ``OS-EXT-SRV-ATTR:hostname`` attribute is now returned in various + server responses regardless of policy configuration. """ # The minimum and maximum versions of the API supported @@ -165,8 +254,8 @@ # minimum version of the API supported. # Note(cyeoh): This only applies for the v2.1 API once microversions # support is fully merged. It does not affect the V2 API. -_MIN_API_VERSION = "2.1" -_MAX_API_VERSION = "2.65" +_MIN_API_VERSION = '2.1' +_MAX_API_VERSION = '2.90' DEFAULT_API_VERSION = _MIN_API_VERSION # Almost all proxy APIs which are related to network, images and baremetal diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 7ac2e000db6..f6c08825a23 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -14,14 +14,12 @@ # under the License. import collections -import functools import itertools import re +from urllib import parse as urlparse from oslo_log import log as logging from oslo_utils import strutils -import six -import six.moves.urllib.parse as urlparse import webob from webob import exc @@ -41,6 +39,9 @@ QUOTAS = quota.QUOTAS +POWER_ON = 'POWER_ON' +POWER_OFF = 'POWER_OFF' + _STATE_MAP = { vm_states.ACTIVE: { 'default': 'ACTIVE', @@ -283,6 +284,7 @@ def check_img_metadata_properties_quota(context, metadata): if not metadata: return try: + QUOTAS.initialize() QUOTAS.limit_check(context, metadata_items=len(metadata)) except exception.OverQuota: expl = _("Image metadata limit exceeded") @@ -460,11 +462,13 @@ def _update_compute_link_prefix(self, orig_url): return self._update_link_prefix(orig_url, CONF.api.compute_link_prefix) -def get_instance(compute_api, context, instance_id, expected_attrs=None): +def get_instance(compute_api, context, instance_id, expected_attrs=None, + cell_down_support=False): """Fetch an instance from the compute API, handling error checking.""" try: return compute_api.get(context, instance_id, - expected_attrs=expected_attrs) + expected_attrs=expected_attrs, + cell_down_support=cell_down_support) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) @@ -493,15 +497,6 @@ def get_flavor(context, flavor_id): raise exc.HTTPNotFound(explanation=error.format_message()) -def check_cells_enabled(function): - @functools.wraps(function) - def inner(*args, **kwargs): - if not CONF.cells.enable: - raise_feature_not_supported() - return function(*args, **kwargs) - return inner - - def is_all_tenants(search_opts): """Checks to see if the all_tenants flag is in search_opts @@ -513,13 +508,28 @@ def is_all_tenants(search_opts): try: all_tenants = strutils.bool_from_string(all_tenants, True) except ValueError as err: - raise exception.InvalidInput(six.text_type(err)) + raise exception.InvalidInput(str(err)) else: # The empty string is considered enabling all_tenants all_tenants = 'all_tenants' in search_opts return all_tenants +def is_locked(search_opts): + """Converts the value of the locked parameter to a boolean. Note that + this function will be called only if locked exists in search_opts. + + :param dict search_opts: The search options for a request + :returns: boolean indicating if locked is being requested or not + """ + locked = search_opts.get('locked') + try: + locked = strutils.bool_from_string(locked, strict=True) + except ValueError as err: + raise exception.InvalidInput(str(err)) + return locked + + def supports_multiattach_volume(req): """Check to see if the requested API version is high enough for multiattach @@ -533,3 +543,14 @@ def supports_multiattach_volume(req): volume multiattach support, False otherwise. """ return api_version_request.is_supported(req, '2.60') + + +def supports_port_resource_request(req): + """Check to see if the requested API version is high enough for resource + request + + :param req: The incoming API request + :returns: True if the requested API microversion is high enough for + port resource request support, False otherwise. + """ + return api_version_request.is_supported(req, '2.72') diff --git a/nova/api/openstack/compute/admin_actions.py b/nova/api/openstack/compute/admin_actions.py index 49eddda7e0e..ba882e6a320 100644 --- a/nova/api/openstack/compute/admin_actions.py +++ b/nova/api/openstack/compute/admin_actions.py @@ -18,9 +18,11 @@ from nova.api.openstack.compute.schemas import reset_server_state from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute +from nova.compute import instance_actions from nova.compute import vm_states from nova import exception +from nova import objects from nova.policies import admin_actions as aa_policies # States usable in resetState action @@ -30,24 +32,15 @@ class AdminActionsController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(AdminActionsController, self).__init__(*args, **kwargs) + def __init__(self): + super(AdminActionsController, self).__init__() self.compute_api = compute.API() - @wsgi.response(202) - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors(410) @wsgi.action('resetNetwork') def _reset_network(self, req, id, body): - """Permit admins to reset networking on a server.""" - context = req.environ['nova.context'] - context.can(aa_policies.POLICY_ROOT % 'reset_network') - instance = common.get_instance(self.compute_api, context, id) - try: - self.compute_api.reset_network(context, instance) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.InstanceIsLocked as e: - raise exc.HTTPConflict(explanation=e.format_message()) + """(Removed) Permit admins to reset networking on a server.""" + raise exc.HTTPGone() @wsgi.response(202) @wsgi.expected_errors((404, 409)) @@ -55,12 +48,11 @@ def _reset_network(self, req, id, body): def _inject_network_info(self, req, id, body): """Permit admins to inject network info into a server.""" context = req.environ['nova.context'] - context.can(aa_policies.POLICY_ROOT % 'inject_network_info') instance = common.get_instance(self.compute_api, context, id) + context.can(aa_policies.POLICY_ROOT % 'inject_network_info', + target={'project_id': instance.project_id}) try: self.compute_api.inject_network_info(context, instance) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) @@ -71,12 +63,18 @@ def _inject_network_info(self, req, id, body): def _reset_state(self, req, id, body): """Permit admins to reset the state of a server.""" context = req.environ["nova.context"] - context.can(aa_policies.POLICY_ROOT % 'reset_state') + instance = common.get_instance(self.compute_api, context, id) + context.can(aa_policies.POLICY_ROOT % 'reset_state', + target={'project_id': instance.project_id}) + + # Log os-resetState as an instance action + instance_action = objects.InstanceAction.action_start( + context, instance.uuid, instance_actions.RESET_STATE) # Identify the desired state from the body state = state_map[body["os-resetState"]["state"]] - instance = common.get_instance(self.compute_api, context, id) instance.vm_state = state instance.task_state = None instance.save(admin_state_reset=True) + instance_action.finish() diff --git a/nova/api/openstack/compute/admin_password.py b/nova/api/openstack/compute/admin_password.py index 407e9436056..36c6cbcf1e5 100644 --- a/nova/api/openstack/compute/admin_password.py +++ b/nova/api/openstack/compute/admin_password.py @@ -18,7 +18,7 @@ from nova.api.openstack.compute.schemas import admin_password from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import admin_password as ap_policies @@ -26,8 +26,8 @@ class AdminPasswordController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(AdminPasswordController, self).__init__(*args, **kwargs) + def __init__(self): + super(AdminPasswordController, self).__init__() self.compute_api = compute.API() # TODO(eliqiao): Here should be 204(No content) instead of 202 by v2.1+ @@ -47,8 +47,6 @@ def change_password(self, req, id, body): password = body['changePassword']['adminPass'] try: self.compute_api.set_admin_password(context, instance, password) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except (exception.InstancePasswordSetFailed, exception.SetAdminPasswdNotSupported, exception.InstanceAgentNotEnabled) as e: diff --git a/nova/api/openstack/compute/agents.py b/nova/api/openstack/compute/agents.py index 0abcef8e162..38e690b59e4 100644 --- a/nova/api/openstack/compute/agents.py +++ b/nova/api/openstack/compute/agents.py @@ -13,153 +13,28 @@ # under the License. -import webob.exc +from webob import exc -from nova.api.openstack.compute.schemas import agents as schema from nova.api.openstack import wsgi -from nova.api import validation -from nova import exception -from nova import objects -from nova.policies import agents as agents_policies -from nova import utils class AgentController(wsgi.Controller): - """The agent is talking about guest agent.The host can use this for - things like accessing files on the disk, configuring networking, - or running other applications/scripts in the guest while it is - running. Typically this uses some hypervisor-specific transport - to avoid being dependent on a working network configuration. - Xen, VMware, and VirtualBox have guest agents,although the Xen - driver is the only one with an implementation for managing them - in openstack. KVM doesn't really have a concept of a guest agent - (although one could be written). + """(Removed) Controller for agent resources. - You can find the design of agent update in this link: - http://wiki.openstack.org/AgentUpdate - and find the code in nova.virt.xenapi.vmops.VMOps._boot_new_instance. - In this design We need update agent in guest from host, so we need - some interfaces to update the agent info in host. - - You can find more information about the design of the GuestAgent in - the following link: - http://wiki.openstack.org/GuestAgent - http://wiki.openstack.org/GuestAgentXenStoreCommunication + This was removed during the Victoria release along with the XenAPI driver. """ - @validation.query_schema(schema.index_query) - @wsgi.expected_errors(()) + @wsgi.expected_errors(410) def index(self, req): - """Return a list of all agent builds. Filter by hypervisor.""" - context = req.environ['nova.context'] - context.can(agents_policies.BASE_POLICY_NAME) - hypervisor = None - agents = [] - if 'hypervisor' in req.GET: - hypervisor = req.GET['hypervisor'] - - builds = objects.AgentList.get_all(context, hypervisor=hypervisor) - for agent_build in builds: - agents.append({'hypervisor': agent_build.hypervisor, - 'os': agent_build.os, - 'architecture': agent_build.architecture, - 'version': agent_build.version, - 'md5hash': agent_build.md5hash, - 'agent_id': agent_build.id, - 'url': agent_build.url}) + raise exc.HTTPGone() - return {'agents': agents} - - @wsgi.expected_errors((400, 404)) - @validation.schema(schema.update) + @wsgi.expected_errors(410) def update(self, req, id, body): - """Update an existing agent build.""" - context = req.environ['nova.context'] - context.can(agents_policies.BASE_POLICY_NAME) - - # TODO(oomichi): This parameter name "para" is different from the ones - # of the other APIs. Most other names are resource names like "server" - # etc. This name should be changed to "agent" for consistent naming - # with v2.1+microversions. - para = body['para'] - - url = para['url'] - md5hash = para['md5hash'] - version = para['version'] - - try: - utils.validate_integer(id, 'id') - except exception.InvalidInput as exc: - raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) - - agent = objects.Agent(context=context, id=id) - agent.obj_reset_changes() - agent.version = version - agent.url = url - agent.md5hash = md5hash - try: - agent.save() - except exception.AgentBuildNotFound as ex: - raise webob.exc.HTTPNotFound(explanation=ex.format_message()) + raise exc.HTTPGone() - # TODO(alex_xu): The agent_id should be integer that consistent with - # create/index actions. But parameter 'id' is string type that parsed - # from url. This is a bug, but because back-compatibility, it can't be - # fixed for v2 API. This will be fixed in v2.1 API by Microversions in - # the future. lp bug #1333494 - return {"agent": {'agent_id': id, 'version': version, - 'url': url, 'md5hash': md5hash}} - - # TODO(oomichi): Here should be 204(No Content) instead of 200 by v2.1 - # +microversions because the resource agent has been deleted completely - # when returning a response. - @wsgi.expected_errors((400, 404)) - @wsgi.response(200) + @wsgi.expected_errors(410) def delete(self, req, id): - """Deletes an existing agent build.""" - context = req.environ['nova.context'] - context.can(agents_policies.BASE_POLICY_NAME) - - try: - utils.validate_integer(id, 'id') - except exception.InvalidInput as exc: - raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) - - try: - agent = objects.Agent(context=context, id=id) - agent.destroy() - except exception.AgentBuildNotFound as ex: - raise webob.exc.HTTPNotFound(explanation=ex.format_message()) + raise exc.HTTPGone() - # TODO(oomichi): Here should be 201(Created) instead of 200 by v2.1 - # +microversions because the creation of a resource agent finishes - # when returning a response. - @wsgi.expected_errors(409) - @wsgi.response(200) - @validation.schema(schema.create) + @wsgi.expected_errors(410) def create(self, req, body): - """Creates a new agent build.""" - context = req.environ['nova.context'] - context.can(agents_policies.BASE_POLICY_NAME) - - agent = body['agent'] - hypervisor = agent['hypervisor'] - os = agent['os'] - architecture = agent['architecture'] - version = agent['version'] - url = agent['url'] - md5hash = agent['md5hash'] - - agent_obj = objects.Agent(context=context) - agent_obj.hypervisor = hypervisor - agent_obj.os = os - agent_obj.architecture = architecture - agent_obj.version = version - agent_obj.url = url - agent_obj.md5hash = md5hash - - try: - agent_obj.create() - agent['agent_id'] = agent_obj.id - except exception.AgentBuildExists as ex: - raise webob.exc.HTTPConflict(explanation=ex.format_message()) - return {'agent': agent} + raise exc.HTTPGone() diff --git a/nova/api/openstack/compute/aggregates.py b/nova/api/openstack/compute/aggregates.py index e44b449e766..43133c42861 100644 --- a/nova/api/openstack/compute/aggregates.py +++ b/nova/api/openstack/compute/aggregates.py @@ -17,17 +17,23 @@ import datetime +from oslo_log import log as logging from webob import exc from nova.api.openstack import api_version_request from nova.api.openstack import common +from nova.api.openstack.compute.schemas import aggregate_images from nova.api.openstack.compute.schemas import aggregates from nova.api.openstack import wsgi from nova.api import validation -from nova.compute import api as compute_api +from nova.compute import api as compute +from nova.conductor import api as conductor from nova import exception from nova.i18n import _ from nova.policies import aggregates as aggr_policies +from nova import utils + +LOG = logging.getLogger(__name__) def _get_context(req): @@ -37,13 +43,15 @@ def _get_context(req): class AggregateController(wsgi.Controller): """The Host Aggregates API controller for the OpenStack API.""" def __init__(self): - self.api = compute_api.AggregateAPI() + super(AggregateController, self).__init__() + self.api = compute.AggregateAPI() + self.conductor_tasks = conductor.ComputeTaskAPI() @wsgi.expected_errors(()) def index(self, req): """Returns a list a host aggregate's id, name, availability_zone.""" context = _get_context(req) - context.can(aggr_policies.POLICY_ROOT % 'index') + context.can(aggr_policies.POLICY_ROOT % 'index', target={}) aggregates = self.api.get_aggregate_list(context) return {'aggregates': [self._marshall_aggregate(req, a)['aggregate'] for a in aggregates]} @@ -58,7 +66,7 @@ def create(self, req, body): optional availability zone. """ context = _get_context(req) - context.can(aggr_policies.POLICY_ROOT % 'create') + context.can(aggr_policies.POLICY_ROOT % 'create', target={}) host_aggregate = body["aggregate"] name = common.normalize_name(host_aggregate["name"]) avail_zone = host_aggregate.get("availability_zone") @@ -84,11 +92,17 @@ def create(self, req, body): return agg - @wsgi.expected_errors(404) + @wsgi.expected_errors((400, 404)) def show(self, req, id): """Shows the details of an aggregate, hosts and metadata included.""" context = _get_context(req) - context.can(aggr_policies.POLICY_ROOT % 'show') + context.can(aggr_policies.POLICY_ROOT % 'show', target={}) + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: aggregate = self.api.get_aggregate(context, id) except exception.AggregateNotFound as e: @@ -101,11 +115,16 @@ def show(self, req, id): def update(self, req, id, body): """Updates the name and/or availability_zone of given aggregate.""" context = _get_context(req) - context.can(aggr_policies.POLICY_ROOT % 'update') + context.can(aggr_policies.POLICY_ROOT % 'update', target={}) updates = body["aggregate"] if 'name' in updates: updates['name'] = common.normalize_name(updates['name']) + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: aggregate = self.api.update_aggregate(context, id, updates) except exception.AggregateNameExists as e: @@ -124,7 +143,13 @@ def update(self, req, id, body): def delete(self, req, id): """Removes an aggregate by id.""" context = _get_context(req) - context.can(aggr_policies.POLICY_ROOT % 'delete') + context.can(aggr_policies.POLICY_ROOT % 'delete', target={}) + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: self.api.delete_aggregate(context, id) except exception.AggregateNotFound as e: @@ -135,7 +160,7 @@ def delete(self, req, id): # NOTE(gmann): Returns 200 for backwards compatibility but should be 202 # for representing async API as this API just accepts the request and # request hypervisor driver to complete the same in async mode. - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((400, 404, 409)) @wsgi.action('add_host') @validation.schema(aggregates.add_host) def _add_host(self, req, id, body): @@ -143,7 +168,13 @@ def _add_host(self, req, id, body): host = body['add_host']['host'] context = _get_context(req) - context.can(aggr_policies.POLICY_ROOT % 'add_host') + context.can(aggr_policies.POLICY_ROOT % 'add_host', target={}) + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: aggregate = self.api.add_host_to_aggregate(context, id, host) except (exception.AggregateNotFound, @@ -158,7 +189,7 @@ def _add_host(self, req, id, body): # NOTE(gmann): Returns 200 for backwards compatibility but should be 202 # for representing async API as this API just accepts the request and # request hypervisor driver to complete the same in async mode. - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((400, 404, 409)) @wsgi.action('remove_host') @validation.schema(aggregates.remove_host) def _remove_host(self, req, id, body): @@ -166,15 +197,27 @@ def _remove_host(self, req, id, body): host = body['remove_host']['host'] context = _get_context(req) - context.can(aggr_policies.POLICY_ROOT % 'remove_host') + context.can(aggr_policies.POLICY_ROOT % 'remove_host', target={}) + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + try: aggregate = self.api.remove_host_from_aggregate(context, id, host) - except (exception.AggregateNotFound, exception.AggregateHostNotFound, - exception.HostMappingNotFound, exception.ComputeHostNotFound): + except (exception.AggregateNotFound, + exception.AggregateHostNotFound, + exception.ComputeHostNotFound) as e: + LOG.error('Failed to remove host %s from aggregate %s. Error: %s', + host, id, str(e)) msg = _('Cannot remove host %(host)s in aggregate %(id)s') % { 'host': host, 'id': id} raise exc.HTTPNotFound(explanation=msg) - except exception.InvalidAggregateAction: + except (exception.InvalidAggregateAction, + exception.ResourceProviderUpdateConflict) as e: + LOG.error('Failed to remove host %s from aggregate %s. Error: %s', + host, id, str(e)) msg = _('Cannot remove host %(host)s in aggregate %(id)s') % { 'host': host, 'id': id} raise exc.HTTPConflict(explanation=msg) @@ -186,7 +229,12 @@ def _remove_host(self, req, id, body): def _set_metadata(self, req, id, body): """Replaces the aggregate's existing metadata with new metadata.""" context = _get_context(req) - context.can(aggr_policies.POLICY_ROOT % 'set_metadata') + context.can(aggr_policies.POLICY_ROOT % 'set_metadata', target={}) + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) metadata = body["set_metadata"]["metadata"] try: @@ -217,7 +265,39 @@ def _build_aggregate_items(self, req, aggregate): # case it is only ['availability_zone']) without worrying about # lazy-loading an unset variable for key in keys: - if ((aggregate.obj_attr_is_set(key) - or key in aggregate.obj_extra_fields) and + if ((aggregate.obj_attr_is_set(key) or + key in aggregate.obj_extra_fields) and (show_uuid or key != 'uuid')): yield key, getattr(aggregate, key) + + @wsgi.Controller.api_version('2.81') + @wsgi.response(202) + @wsgi.expected_errors((400, 404)) + @validation.schema(aggregate_images.aggregate_images_v2_81) + def images(self, req, id, body): + """Allows image cache management requests.""" + context = _get_context(req) + context.can(aggr_policies.NEW_POLICY_ROOT % 'images', target={}) + + try: + utils.validate_integer(id, 'id') + except exception.InvalidInput as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + + image_ids = [] + for image_req in body.get('cache'): + image_ids.append(image_req['id']) + + if image_ids != list(set(image_ids)): + raise exc.HTTPBadRequest( + explanation=_('Duplicate images in request')) + + try: + aggregate = self.api.get_aggregate(context, id) + except exception.AggregateNotFound as e: + raise exc.HTTPNotFound(explanation=e.format_message()) + + try: + self.conductor_tasks.cache_images(context, aggregate, image_ids) + except exception.NovaException as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) diff --git a/nova/api/openstack/compute/assisted_volume_snapshots.py b/nova/api/openstack/compute/assisted_volume_snapshots.py index 5e98c3fbf8a..ea6ebc83597 100644 --- a/nova/api/openstack/compute/assisted_volume_snapshots.py +++ b/nova/api/openstack/compute/assisted_volume_snapshots.py @@ -17,13 +17,12 @@ """The Assisted volume snapshots extension.""" from oslo_serialization import jsonutils -import six from webob import exc from nova.api.openstack.compute.schemas import assisted_volume_snapshots from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import assisted_volume_snapshots as avs_policies @@ -32,15 +31,15 @@ class AssistedVolumeSnapshotsController(wsgi.Controller): """The Assisted volume snapshots API controller for the OpenStack API.""" def __init__(self): - self.compute_api = compute.API() super(AssistedVolumeSnapshotsController, self).__init__() + self.compute_api = compute.API() @wsgi.expected_errors(400) @validation.schema(assisted_volume_snapshots.snapshots_create) def create(self, req, body): """Creates a new snapshot.""" context = req.environ['nova.context'] - context.can(avs_policies.POLICY_ROOT % 'create') + context.can(avs_policies.POLICY_ROOT % 'create', target={}) snapshot = body['snapshot'] create_info = snapshot['create_info'] @@ -62,12 +61,15 @@ def create(self, req, body): raise exc.HTTPBadRequest(explanation=e.format_message()) @wsgi.response(204) - @validation.query_schema(assisted_volume_snapshots.delete_query) + @validation.query_schema(assisted_volume_snapshots.delete_query_275, + '2.75') + @validation.query_schema(assisted_volume_snapshots.delete_query, '2.0', + '2.74') @wsgi.expected_errors((400, 404)) def delete(self, req, id): """Delete a snapshot.""" context = req.environ['nova.context'] - context.can(avs_policies.POLICY_ROOT % 'delete') + context.can(avs_policies.POLICY_ROOT % 'delete', target={}) delete_metadata = {} delete_metadata.update(req.GET) @@ -76,7 +78,7 @@ def delete(self, req, id): delete_info = jsonutils.loads(delete_metadata['delete_info']) volume_id = delete_info['volume_id'] except (KeyError, ValueError) as e: - raise exc.HTTPBadRequest(explanation=six.text_type(e)) + raise exc.HTTPBadRequest(explanation=str(e)) try: self.compute_api.volume_snapshot_delete(context, volume_id, diff --git a/nova/api/openstack/compute/attach_interfaces.py b/nova/api/openstack/compute/attach_interfaces.py index 717b85f132f..6a24a609599 100644 --- a/nova/api/openstack/compute/attach_interfaces.py +++ b/nova/api/openstack/compute/attach_interfaces.py @@ -18,43 +18,59 @@ import webob from webob import exc +from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import attach_interfaces from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ -from nova import network +from nova.network import neutron +from nova import objects from nova.policies import attach_interfaces as ai_policies -def _translate_interface_attachment_view(port_info): - """Maps keys for interface attachment details view.""" - return { +def _translate_interface_attachment_view(context, port_info, show_tag=False): + """Maps keys for interface attachment details view. + + :param port_info: dict of port details from the networking service + :param show_tag: If True, includes the "tag" key in the returned dict, + else the "tag" entry is omitted (default: False) + :returns: dict of a subset of details about the port and optionally the + tag associated with the VirtualInterface record in the nova database + """ + info = { 'net_id': port_info['network_id'], 'port_id': port_info['id'], 'mac_addr': port_info['mac_address'], 'port_state': port_info['status'], 'fixed_ips': port_info.get('fixed_ips', None), } + if show_tag: + # Get the VIF for this port (if one exists - VirtualInterface records + # did not exist for neutron ports until the Newton release). + vif = objects.VirtualInterface.get_by_uuid(context, port_info['id']) + info['tag'] = vif.tag if vif else None + return info class InterfaceAttachmentController(wsgi.Controller): """The interface attachment API controller for the OpenStack API.""" def __init__(self): - self.compute_api = compute.API() - self.network_api = network.API() super(InterfaceAttachmentController, self).__init__() + self.compute_api = compute.API() + self.network_api = neutron.API() @wsgi.expected_errors((404, 501)) def index(self, req, server_id): """Returns the list of interface attachments for a given instance.""" context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) - instance = common.get_instance(self.compute_api, context, server_id) + context.can(ai_policies.POLICY_ROOT % 'list', + target={'project_id': instance.project_id}) + search_opts = {'device_id': instance.uuid} try: @@ -64,9 +80,28 @@ def index(self, req, server_id): except NotImplementedError: common.raise_feature_not_supported() + # If showing tags, get the VirtualInterfaceList for the server and + # map VIFs by port ID. Note that VirtualInterface records did not + # exist for neutron ports until the Newton release so it's OK if we + # are missing records for old servers. + show_tag = api_version_request.is_supported(req, '2.70') + tag_per_port_id = {} + if show_tag: + vifs = objects.VirtualInterfaceList.get_by_instance_uuid( + context, server_id) + tag_per_port_id = {vif.uuid: vif.tag for vif in vifs} + + results = [] ports = data.get('ports', []) - entity_maker = _translate_interface_attachment_view - results = [entity_maker(port) for port in ports] + for port in ports: + # Note that we do not pass show_tag=show_tag to + # _translate_interface_attachment_view because we are handling it + # ourselves here since we have the list of VIFs which is better + # for performance than doing a DB query per port. + info = _translate_interface_attachment_view(context, port) + if show_tag: + info['tag'] = tag_per_port_id.get(port['id']) + results.append(info) return {'interfaceAttachments': results} @@ -74,13 +109,11 @@ def index(self, req, server_id): def show(self, req, server_id, id): """Return data about the given interface attachment.""" context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, server_id) + context.can(ai_policies.POLICY_ROOT % 'show', + target={'project_id': instance.project_id}) port_id = id - # NOTE(mriedem): We need to verify the instance actually exists from - # the server_id even though we're not using the instance for anything, - # just the port id. - common.get_instance(self.compute_api, context, server_id) try: port_info = self.network_api.show_port(context, port_id) @@ -94,17 +127,21 @@ def show(self, req, server_id, id): "%(port)s") % {'instance': server_id, 'port': port_id} raise exc.HTTPNotFound(explanation=msg) - return {'interfaceAttachment': _translate_interface_attachment_view( - port_info['port'])} + return {'interfaceAttachment': + _translate_interface_attachment_view( + context, port_info['port'], + show_tag=api_version_request.is_supported(req, '2.70'))} - @wsgi.expected_errors((400, 404, 409, 500, 501)) + @wsgi.expected_errors((400, 403, 404, 409, 500, 501)) @validation.schema(attach_interfaces.create, '2.0', '2.48') @validation.schema(attach_interfaces.create_v249, '2.49') def create(self, req, server_id, body): """Attach an interface to an instance.""" context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) - context.can(ai_policies.POLICY_ROOT % 'create') + instance = common.get_instance(self.compute_api, context, server_id) + + context.can(ai_policies.POLICY_ROOT % 'create', + target={'project_id': instance.project_id}) network_id = None port_id = None @@ -127,7 +164,6 @@ def create(self, req, server_id, body): msg = _("Must input network_id when request IP address") raise exc.HTTPBadRequest(explanation=msg) - instance = common.get_instance(self.compute_api, context, server_id) try: vif = self.compute_api.attach_interface(context, instance, network_id, port_id, req_ip, tag=tag) @@ -137,15 +173,25 @@ def create(self, req, server_id, body): exception.PortNotUsable, exception.AttachInterfaceNotSupported, exception.SecurityGroupCannotBeApplied, - exception.NetworkInterfaceTaggedAttachNotSupported) as e: + exception.NetworkInterfaceTaggedAttachNotSupported, + exception.NetworksWithQoSPolicyNotSupported, + exception.InterfaceAttachPciClaimFailed, + exception.InterfaceAttachResourceAllocationFailed, + exception.ForbiddenPortsWithAccelerator, + exception.ExtendedResourceRequestOldCompute, + ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) - except (exception.InstanceIsLocked, - exception.FixedIpAlreadyInUse, - exception.PortInUse) as e: + except ( + exception.InstanceIsLocked, + exception.FixedIpAlreadyInUse, + exception.PortInUse, + ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except (exception.PortNotFound, exception.NetworkNotFound) as e: raise exc.HTTPNotFound(explanation=e.format_message()) + except exception.PortLimitExceeded as e: + raise exc.HTTPForbidden(explanation=e.format_message()) except exception.InterfaceAttachFailed as e: raise webob.exc.HTTPInternalServerError( explanation=e.format_message()) @@ -156,16 +202,18 @@ def create(self, req, server_id, body): return self.show(req, server_id, vif['id']) @wsgi.response(202) - @wsgi.expected_errors((404, 409, 501)) + @wsgi.expected_errors((400, 404, 409, 501)) def delete(self, req, server_id, id): """Detach an interface from an instance.""" context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) - context.can(ai_policies.POLICY_ROOT % 'delete') - port_id = id instance = common.get_instance(self.compute_api, context, server_id, expected_attrs=['device_metadata']) + + context.can(ai_policies.POLICY_ROOT % 'delete', + target={'project_id': instance.project_id}) + port_id = id + try: self.compute_api.detach_interface(context, instance, port_id=port_id) @@ -178,3 +226,5 @@ def delete(self, req, server_id, id): except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'detach_interface', server_id) + except exception.ForbiddenPortsWithAccelerator as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) diff --git a/nova/api/openstack/compute/availability_zone.py b/nova/api/openstack/compute/availability_zone.py index b9fa5ebf111..20b4a2147a3 100644 --- a/nova/api/openstack/compute/availability_zone.py +++ b/nova/api/openstack/compute/availability_zone.py @@ -14,7 +14,7 @@ from nova.api.openstack import wsgi from nova import availability_zones -from nova import compute +from nova.compute import api as compute import nova.conf from nova.policies import availability_zone as az_policies from nova import servicegroup @@ -44,8 +44,9 @@ def _get_filtered_availability_zones(self, zones, is_available): def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() - available_zones, not_available_zones = \ - availability_zones.get_availability_zones(ctxt) + available_zones, not_available_zones = ( + availability_zones.get_availability_zones( + ctxt, self.host_api)) filtered_available_zones = \ self._get_filtered_availability_zones(available_zones, True) @@ -56,25 +57,23 @@ def _describe_availability_zones(self, context, **kwargs): def _describe_availability_zones_verbose(self, context, **kwargs): ctxt = context.elevated() - available_zones, not_available_zones = \ - availability_zones.get_availability_zones(ctxt) - # Available services - enabled_services = self.host_api.service_get_all( - context, {'disabled': False}, set_zones=True, all_cells=True) + services = self.host_api.service_get_all( + context, set_zones=True, all_cells=True) + available_zones, not_available_zones = ( + availability_zones.get_availability_zones( + ctxt, self.host_api, services=services)) zone_hosts = {} host_services = {} api_services = ('nova-osapi_compute', 'nova-metadata') - for service in enabled_services: + for service in filter(lambda x: not x.disabled, services): if service.binary in api_services: # Skip API services in the listing since they are not # maintained in the same way as other services continue - zone_hosts.setdefault(service['availability_zone'], []) - if service['host'] not in zone_hosts[service['availability_zone']]: - zone_hosts[service['availability_zone']].append( - service['host']) + zone_hosts.setdefault(service['availability_zone'], set()) + zone_hosts[service['availability_zone']].add(service['host']) host_services.setdefault(service['availability_zone'] + service['host'], []) @@ -88,9 +87,11 @@ def _describe_availability_zones_verbose(self, context, **kwargs): hosts[host] = {} for service in host_services[zone + host]: alive = self.servicegroup_api.service_is_up(service) - hosts[host][service['binary']] = {'available': alive, - 'active': True != service['disabled'], - 'updated_at': service['updated_at']} + hosts[host][service['binary']] = { + 'available': alive, + 'active': service['disabled'] is not True, + 'updated_at': service['updated_at'] + } result.append({'zoneName': zone, 'zoneState': {'available': True}, "hosts": hosts}) @@ -105,7 +106,7 @@ def _describe_availability_zones_verbose(self, context, **kwargs): def index(self, req): """Returns a summary list of availability zone.""" context = req.environ['nova.context'] - context.can(az_policies.POLICY_ROOT % 'list') + context.can(az_policies.POLICY_ROOT % 'list', target={}) return self._describe_availability_zones(context) @@ -113,6 +114,6 @@ def index(self, req): def detail(self, req): """Returns a detailed list of availability zone.""" context = req.environ['nova.context'] - context.can(az_policies.POLICY_ROOT % 'detail') + context.can(az_policies.POLICY_ROOT % 'detail', target={}) return self._describe_availability_zones_verbose(context) diff --git a/nova/api/openstack/compute/baremetal_nodes.py b/nova/api/openstack/compute/baremetal_nodes.py index 139932ba717..a9d304b4597 100644 --- a/nova/api/openstack/compute/baremetal_nodes.py +++ b/nova/api/openstack/compute/baremetal_nodes.py @@ -30,13 +30,6 @@ ironic_client = importutils.try_import('ironicclient.client') ironic_exc = importutils.try_import('ironicclient.exc') -node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address', - 'pm_user', 'service_host', 'terminal_port', 'instance_uuid'] - -node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path'] - -interface_fields = ['id', 'address', 'datapath_id', 'port_no'] - CONF = nova.conf.CONF @@ -49,6 +42,9 @@ def _check_ironic_client_enabled(): def _get_ironic_client(): """return an Ironic client.""" # TODO(NobodyCam): Fix insecure setting + # NOTE(efried): This should all be replaced by ksa adapter options; but the + # nova-to-baremetal API is deprecated, so not changing it. + # https://docs.openstack.org/api-ref/compute/#bare-metal-nodes-os-baremetal-nodes-deprecated # noqa kwargs = {'os_username': CONF.ironic.admin_username, 'os_password': CONF.ironic.admin_password, 'os_auth_url': CONF.ironic.admin_url, @@ -56,7 +52,7 @@ def _get_ironic_client(): 'os_service_type': 'baremetal', 'os_endpoint_type': 'public', 'insecure': 'true', - 'ironic_url': CONF.ironic.api_endpoint} + 'endpoint': CONF.ironic.endpoint_override} # NOTE(mriedem): The 1 api_version arg here is the only valid value for # the client, but it's not even used so it doesn't really matter. The # ironic client wrapper in the virt driver actually uses a hard-coded @@ -75,19 +71,12 @@ def _no_ironic_proxy(cmd): class BareMetalNodeController(wsgi.Controller): """The Bare-Metal Node API controller for the OpenStack API.""" - def _node_dict(self, node_ref): - d = {} - for f in node_fields: - d[f] = node_ref.get(f) - for f in node_ext_fields: - d[f] = node_ref.get(f) - return d - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((404, 501)) def index(self, req): context = req.environ['nova.context'] - context.can(bn_policies.BASE_POLICY_NAME) + context.can(bn_policies.BASE_POLICY_NAME % 'list', + target={}) nodes = [] # proxy command to Ironic _check_ironic_client_enabled() @@ -108,7 +97,8 @@ def index(self, req): @wsgi.expected_errors((404, 501)) def show(self, req, id): context = req.environ['nova.context'] - context.can(bn_policies.BASE_POLICY_NAME) + context.can(bn_policies.BASE_POLICY_NAME % 'show', + target={}) # proxy command to Ironic _check_ironic_client_enabled() icli = _get_ironic_client() diff --git a/nova/api/openstack/compute/cells.py b/nova/api/openstack/compute/cells.py index 2dcc48ba50f..87ec64b4b72 100644 --- a/nova/api/openstack/compute/cells.py +++ b/nova/api/openstack/compute/cells.py @@ -14,289 +14,49 @@ # License for the specific language governing permissions and limitations # under the License. -"""The cells extension.""" - -import oslo_messaging as messaging -from oslo_utils import strutils -import six from webob import exc -from nova.api.openstack import common -from nova.api.openstack.compute.schemas import cells from nova.api.openstack import wsgi -from nova.api import validation -from nova.cells import rpcapi as cells_rpcapi -import nova.conf -from nova import exception -from nova.i18n import _ -from nova.policies import cells as cells_policies -from nova import rpc - - -CONF = nova.conf.CONF - - -def _filter_keys(item, keys): - """Filters all model attributes except for keys - item is a dict - """ - return {k: v for k, v in item.items() if k in keys} - - -def _fixup_cell_info(cell_info, keys): - """If the transport_url is present in the cell, derive username, - rpc_host, and rpc_port from it. - """ - - if 'transport_url' not in cell_info: - return - - # Disassemble the transport URL - transport_url = cell_info.pop('transport_url') - try: - transport_url = rpc.get_transport_url(transport_url) - except messaging.InvalidTransportURL: - # Just go with None's - for key in keys: - cell_info.setdefault(key, None) - return - - if not transport_url.hosts: - return - - transport_host = transport_url.hosts[0] - - transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'} - for key in keys: - if key in cell_info: - continue - - transport_field = transport_field_map.get(key, key) - cell_info[key] = getattr(transport_host, transport_field) - - -def _scrub_cell(cell, detail=False): - keys = ['name', 'username', 'rpc_host', 'rpc_port'] - if detail: - keys.append('capabilities') - - cell_info = _filter_keys(cell, keys + ['transport_url']) - _fixup_cell_info(cell_info, keys) - cell_info['type'] = 'parent' if cell['is_parent'] else 'child' - return cell_info class CellsController(wsgi.Controller): - """Controller for Cell resources.""" - - def __init__(self): - self.cells_rpcapi = cells_rpcapi.CellsAPI() + """(Removed) Controller for Cell resources. - def _get_cells(self, ctxt, req, detail=False): - """Return all cells.""" - # Ask the CellsManager for the most recent data - items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt) - items = common.limited(items, req) - items = [_scrub_cell(item, detail=detail) for item in items] - return dict(cells=items) + This was removed during the Train release in favour of cells v2. + """ - @wsgi.expected_errors(501) - @common.check_cells_enabled + @wsgi.expected_errors(410) def index(self, req): - """Return all cells in brief.""" - ctxt = req.environ['nova.context'] - ctxt.can(cells_policies.BASE_POLICY_NAME) - return self._get_cells(ctxt, req) + raise exc.HTTPGone() - @wsgi.expected_errors(501) - @common.check_cells_enabled + @wsgi.expected_errors(410) def detail(self, req): - """Return all cells in detail.""" - ctxt = req.environ['nova.context'] - ctxt.can(cells_policies.BASE_POLICY_NAME) - return self._get_cells(ctxt, req, detail=True) + raise exc.HTTPGone() - @wsgi.expected_errors(501) - @common.check_cells_enabled + @wsgi.expected_errors(410) def info(self, req): - """Return name and capabilities for this cell.""" - context = req.environ['nova.context'] - context.can(cells_policies.BASE_POLICY_NAME) - cell_capabs = {} - my_caps = CONF.cells.capabilities - for cap in my_caps: - key, value = cap.split('=') - cell_capabs[key] = value - cell = {'name': CONF.cells.name, - 'type': 'self', - 'rpc_host': None, - 'rpc_port': 0, - 'username': None, - 'capabilities': cell_capabs} - return dict(cell=cell) + raise exc.HTTPGone() - @wsgi.expected_errors((404, 501)) - @common.check_cells_enabled + @wsgi.expected_errors(410) def capacities(self, req, id=None): - """Return capacities for a given cell or all cells.""" - # TODO(kaushikc): return capacities as a part of cell info and - # cells detail calls in v2.1, along with capabilities - context = req.environ['nova.context'] - context.can(cells_policies.BASE_POLICY_NAME) - try: - capacities = self.cells_rpcapi.get_capacities(context, - cell_name=id) - except exception.CellNotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - - return dict(cell={"capacities": capacities}) + raise exc.HTTPGone() - @wsgi.expected_errors((404, 501)) - @common.check_cells_enabled + @wsgi.expected_errors(410) def show(self, req, id): - """Return data about the given cell name. 'id' is a cell name.""" - context = req.environ['nova.context'] - context.can(cells_policies.BASE_POLICY_NAME) - try: - cell = self.cells_rpcapi.cell_get(context, id) - except exception.CellNotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - return dict(cell=_scrub_cell(cell)) + raise exc.HTTPGone() - # NOTE(gmann): Returns 200 for backwards compatibility but should be 204 - # as this operation complete the deletion of aggregate resource and return - # no response body. - @wsgi.expected_errors((403, 404, 501)) - @common.check_cells_enabled + @wsgi.expected_errors(410) def delete(self, req, id): - """Delete a child or parent cell entry. 'id' is a cell name.""" - context = req.environ['nova.context'] - - context.can(cells_policies.POLICY_ROOT % "delete") - - try: - num_deleted = self.cells_rpcapi.cell_delete(context, id) - except exception.CellsUpdateUnsupported as e: - raise exc.HTTPForbidden(explanation=e.format_message()) - if num_deleted == 0: - raise exc.HTTPNotFound( - explanation=_("Cell %s doesn't exist.") % id) - - def _normalize_cell(self, cell, existing=None): - """Normalize input cell data. Normalizations include: - - * Converting cell['type'] to is_parent boolean. - * Merging existing transport URL with transport information. - """ - - if 'name' in cell: - cell['name'] = common.normalize_name(cell['name']) - - # Start with the cell type conversion - if 'type' in cell: - cell['is_parent'] = cell.pop('type') == 'parent' - # Avoid cell type being overwritten to 'child' - elif existing: - cell['is_parent'] = existing['is_parent'] - else: - cell['is_parent'] = False - - # Now we disassemble the existing transport URL... - transport_url = existing.get('transport_url') if existing else None - transport_url = rpc.get_transport_url(transport_url) - - if 'rpc_virtual_host' in cell: - transport_url.virtual_host = cell.pop('rpc_virtual_host') + raise exc.HTTPGone() - if not transport_url.hosts: - transport_url.hosts.append(messaging.TransportHost()) - transport_host = transport_url.hosts[0] - if 'rpc_port' in cell: - cell['rpc_port'] = int(cell['rpc_port']) - # Copy over the input fields - transport_field_map = { - 'username': 'username', - 'password': 'password', - 'hostname': 'rpc_host', - 'port': 'rpc_port', - } - for key, input_field in transport_field_map.items(): - # Only override the value if we're given an override - if input_field in cell: - setattr(transport_host, key, cell.pop(input_field)) - - # Now set the transport URL - cell['transport_url'] = str(transport_url) - - # NOTE(gmann): Returns 200 for backwards compatibility but should be 201 - # as this operation complete the creation of aggregates resource when - # returning a response. - @wsgi.expected_errors((400, 403, 501)) - @common.check_cells_enabled - @validation.schema(cells.create_v20, '2.0', '2.0') - @validation.schema(cells.create, '2.1') + @wsgi.expected_errors(410) def create(self, req, body): - """Create a child cell entry.""" - context = req.environ['nova.context'] - - context.can(cells_policies.POLICY_ROOT % "create") + raise exc.HTTPGone() - cell = body['cell'] - self._normalize_cell(cell) - try: - cell = self.cells_rpcapi.cell_create(context, cell) - except exception.CellsUpdateUnsupported as e: - raise exc.HTTPForbidden(explanation=e.format_message()) - return dict(cell=_scrub_cell(cell)) - - @wsgi.expected_errors((400, 403, 404, 501)) - @common.check_cells_enabled - @validation.schema(cells.update_v20, '2.0', '2.0') - @validation.schema(cells.update, '2.1') + @wsgi.expected_errors(410) def update(self, req, id, body): - """Update a child cell entry. 'id' is the cell name to update.""" - context = req.environ['nova.context'] - - context.can(cells_policies.POLICY_ROOT % "update") + raise exc.HTTPGone() - cell = body['cell'] - cell.pop('id', None) - - try: - # NOTE(Vek): There is a race condition here if multiple - # callers are trying to update the cell - # information simultaneously. Since this - # operation is administrative in nature, and - # will be going away in the future, I don't see - # it as much of a problem... - existing = self.cells_rpcapi.cell_get(context, id) - except exception.CellNotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - self._normalize_cell(cell, existing) - try: - cell = self.cells_rpcapi.cell_update(context, id, cell) - except exception.CellNotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.CellsUpdateUnsupported as e: - raise exc.HTTPForbidden(explanation=e.format_message()) - return dict(cell=_scrub_cell(cell)) - - # NOTE(gmann): Returns 200 for backwards compatibility but should be 204 - # as this operation complete the sync instance info and return - # no response body. - @wsgi.expected_errors((400, 501)) - @common.check_cells_enabled - @validation.schema(cells.sync_instances) + @wsgi.expected_errors(410) def sync_instances(self, req, body): - """Tell all cells to sync instance info.""" - context = req.environ['nova.context'] - - context.can(cells_policies.POLICY_ROOT % "sync_instances") - - project_id = body.pop('project_id', None) - deleted = body.pop('deleted', False) - updated_since = body.pop('updated_since', None) - if isinstance(deleted, six.string_types): - deleted = strutils.bool_from_string(deleted, strict=True) - self.cells_rpcapi.sync_instances(context, project_id=project_id, - updated_since=updated_since, deleted=deleted) + raise exc.HTTPGone() diff --git a/nova/api/openstack/compute/config_drive.py b/nova/api/openstack/compute/config_drive.py deleted file mode 100644 index 6987499a0d3..00000000000 --- a/nova/api/openstack/compute/config_drive.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Config Drive extension.""" - -from nova.api.openstack import wsgi -from nova.policies import config_drive as cd_policies - -ATTRIBUTE_NAME = "config_drive" - - -class ConfigDriveController(wsgi.Controller): - - def _add_config_drive(self, req, servers): - for server in servers: - db_server = req.get_db_instance(server['id']) - # server['id'] is guaranteed to be in the cache due to - # the core API adding it in its 'show'/'detail' methods. - server[ATTRIBUTE_NAME] = db_server['config_drive'] - - def _show(self, req, resp_obj): - if 'server' in resp_obj.obj: - server = resp_obj.obj['server'] - self._add_config_drive(req, [server]) - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['nova.context'] - if context.can(cd_policies.BASE_POLICY_NAME, fatal=False): - self._show(req, resp_obj) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['nova.context'] - if 'servers' in resp_obj.obj and context.can( - cd_policies.BASE_POLICY_NAME, fatal=False): - servers = resp_obj.obj['servers'] - self._add_config_drive(req, servers) diff --git a/nova/api/openstack/compute/console_auth_tokens.py b/nova/api/openstack/compute/console_auth_tokens.py index 12c0b61c268..ae838fe8579 100644 --- a/nova/api/openstack/compute/console_auth_tokens.py +++ b/nova/api/openstack/compute/console_auth_tokens.py @@ -16,42 +16,58 @@ import webob from nova.api.openstack import wsgi -from nova.consoleauth import rpcapi as consoleauth_rpcapi +import nova.conf +from nova import context as nova_context from nova.i18n import _ +from nova import objects from nova.policies import console_auth_tokens as cat_policies +CONF = nova.conf.CONF + class ConsoleAuthTokensController(wsgi.Controller): - def __init__(self, *args, **kwargs): - self._consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() - super(ConsoleAuthTokensController, self).__init__(*args, **kwargs) def _show(self, req, id, rdp_only): """Checks a console auth token and returns the related connect info.""" context = req.environ['nova.context'] - context.can(cat_policies.BASE_POLICY_NAME) + context.can(cat_policies.BASE_POLICY_NAME, target={}) token = id if not token: msg = _("token not provided") raise webob.exc.HTTPBadRequest(explanation=msg) - connect_info = self._consoleauth_rpcapi.check_token(context, token) + connect_info = None + + results = nova_context.scatter_gather_skip_cell0( + context, objects.ConsoleAuthToken.validate, token) + # NOTE(melwitt): Console token auths are stored in cell databases, + # but with only the token as a request param, we can't know which + # cell database contains the token's corresponding connection info. + # So, we must query all cells for the info and we can break the + # loop as soon as we find a result because the token is associated + # with one instance, which can only be in one cell. + for result in results.values(): + if not nova_context.is_cell_failure_sentinel(result): + connect_info = result + break + if not connect_info: raise webob.exc.HTTPNotFound(explanation=_("Token not found")) - console_type = connect_info.get('console_type') + console_type = connect_info.console_type if rdp_only and console_type != "rdp-html5": raise webob.exc.HTTPUnauthorized( explanation=_("The requested console type details are not " "accessible")) - return {'console': - {i: connect_info[i] - for i in ['instance_uuid', 'host', 'port', - 'internal_access_path'] - if i in connect_info}} + return {'console': { + 'instance_uuid': connect_info.instance_uuid, + 'host': connect_info.host, + 'port': connect_info.port, + 'internal_access_path': connect_info.internal_access_path, + }} @wsgi.Controller.api_version("2.1", "2.30") @wsgi.expected_errors((400, 401, 404)) @@ -60,5 +76,5 @@ def show(self, req, id): @wsgi.Controller.api_version("2.31") # noqa @wsgi.expected_errors((400, 404)) - def show(self, req, id): + def show(self, req, id): # noqa return self._show(req, id, False) diff --git a/nova/api/openstack/compute/console_output.py b/nova/api/openstack/compute/console_output.py index 45586ac4854..81d02d3856f 100644 --- a/nova/api/openstack/compute/console_output.py +++ b/nova/api/openstack/compute/console_output.py @@ -14,22 +14,20 @@ # License for the specific language governing permissions and limitations # under the License. -import re - import webob from nova.api.openstack import common from nova.api.openstack.compute.schemas import console_output from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import console_output as co_policies class ConsoleOutputController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(ConsoleOutputController, self).__init__(*args, **kwargs) + def __init__(self): + super(ConsoleOutputController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors((404, 409, 501)) @@ -38,9 +36,10 @@ def __init__(self, *args, **kwargs): def get_console_output(self, req, id, body): """Get text console output.""" context = req.environ['nova.context'] - context.can(co_policies.BASE_POLICY_NAME) - instance = common.get_instance(self.compute_api, context, id) + context.can(co_policies.BASE_POLICY_NAME, + target={'project_id': instance.project_id}) + length = body['os-getConsoleOutput'].get('length') # TODO(cyeoh): In a future API update accept a length of -1 # as meaning unlimited length (convert to None) @@ -60,11 +59,4 @@ def get_console_output(self, req, id, body): except NotImplementedError: common.raise_feature_not_supported() - # XML output is not correctly escaped, so remove invalid characters - # NOTE(cyeoh): We don't support XML output with V2.1, but for - # backwards compatibility reasons we continue to filter the output - # We should remove this in the future - remove_re = re.compile('[\x00-\x08\x0B-\x1F]') - output = remove_re.sub('', output) - return {'output': output} diff --git a/nova/api/openstack/compute/consoles.py b/nova/api/openstack/compute/consoles.py index 3e332d78f97..16243d56ff0 100644 --- a/nova/api/openstack/compute/consoles.py +++ b/nova/api/openstack/compute/consoles.py @@ -16,88 +16,27 @@ from webob import exc from nova.api.openstack import wsgi -from nova.console import api as console_api -from nova import exception -from nova.policies import consoles as consoles_policies - - -def _translate_keys(cons): - """Coerces a console instance into proper dictionary format.""" - pool = cons['pool'] - info = {'id': cons['id'], - 'console_type': pool['console_type']} - return dict(console=info) - - -def _translate_detail_keys(cons): - """Coerces a console instance into proper dictionary format with detail.""" - pool = cons['pool'] - info = {'id': cons['id'], - 'console_type': pool['console_type'], - 'password': cons['password'], - 'instance_name': cons['instance_name'], - 'port': cons['port'], - 'host': pool['public_hostname']} - return dict(console=info) class ConsolesController(wsgi.Controller): - """The Consoles controller for the OpenStack API.""" + """(Removed) The Consoles controller for the OpenStack API. - def __init__(self): - self.console_api = console_api.API() + This was removed during the Ussuri release along with the nova-console + service. + """ - @wsgi.expected_errors(()) + @wsgi.expected_errors(410) def index(self, req, server_id): - """Returns a list of consoles for this instance.""" - context = req.environ['nova.context'] - context.can(consoles_policies.POLICY_ROOT % 'index') + raise exc.HTTPGone() - consoles = self.console_api.get_consoles( - req.environ['nova.context'], server_id) - return dict(consoles=[_translate_keys(console) - for console in consoles]) - - # NOTE(gmann): Here should be 201 instead of 200 by v2.1 - # +microversions because the console has been created - # completely when returning a response. - @wsgi.expected_errors(404) + @wsgi.expected_errors(410) def create(self, req, server_id, body): - """Creates a new console.""" - context = req.environ['nova.context'] - context.can(consoles_policies.POLICY_ROOT % 'create') - - try: - self.console_api.create_console( - req.environ['nova.context'], server_id) - except exception.InstanceNotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) + raise exc.HTTPGone() - @wsgi.expected_errors(404) + @wsgi.expected_errors(410) def show(self, req, server_id, id): - """Shows in-depth information on a specific console.""" - context = req.environ['nova.context'] - context.can(consoles_policies.POLICY_ROOT % 'show') + raise exc.HTTPGone() - try: - console = self.console_api.get_console( - req.environ['nova.context'], - server_id, - int(id)) - except exception.ConsoleNotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - return _translate_detail_keys(console) - - @wsgi.response(202) - @wsgi.expected_errors(404) + @wsgi.expected_errors(410) def delete(self, req, server_id, id): - """Deletes a console.""" - context = req.environ['nova.context'] - context.can(consoles_policies.POLICY_ROOT % 'delete') - - try: - self.console_api.delete_console(req.environ['nova.context'], - server_id, - int(id)) - except exception.ConsoleNotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) + raise exc.HTTPGone() diff --git a/nova/api/openstack/compute/create_backup.py b/nova/api/openstack/compute/create_backup.py index 9b2f59cd3f3..43b4114b986 100644 --- a/nova/api/openstack/compute/create_backup.py +++ b/nova/api/openstack/compute/create_backup.py @@ -20,14 +20,14 @@ from nova.api.openstack.compute.schemas import create_backup from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import create_backup as cb_policies class CreateBackupController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(CreateBackupController, self).__init__(*args, **kwargs) + def __init__(self): + super(CreateBackupController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @@ -47,7 +47,9 @@ def _create_backup(self, req, id, body): """ context = req.environ["nova.context"] - context.can(cb_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, id) + context.can(cb_policies.BASE_POLICY_NAME, + target={'project_id': instance.project_id}) entity = body["createBackup"] image_name = common.normalize_name(entity["name"]) @@ -63,13 +65,9 @@ def _create_backup(self, req, id, body): common.check_img_metadata_properties_quota(context, metadata) props.update(metadata) - instance = common.get_instance(self.compute_api, context, id) - try: image = self.compute_api.backup(context, instance, image_name, backup_type, rotation, extra_properties=props) - except exception.InstanceUnknownCell as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'createBackup', id) diff --git a/nova/api/openstack/compute/deferred_delete.py b/nova/api/openstack/compute/deferred_delete.py index 4b6f2619383..55879267ffd 100644 --- a/nova/api/openstack/compute/deferred_delete.py +++ b/nova/api/openstack/compute/deferred_delete.py @@ -19,14 +19,14 @@ from nova.api.openstack import common from nova.api.openstack import wsgi -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import deferred_delete as dd_policies class DeferredDeleteController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(DeferredDeleteController, self).__init__(*args, **kwargs) + def __init__(self): + super(DeferredDeleteController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @@ -35,12 +35,11 @@ def __init__(self, *args, **kwargs): def _restore(self, req, id, body): """Restore a previously deleted instance.""" context = req.environ["nova.context"] - context.can(dd_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, id) + context.can(dd_policies.BASE_POLICY_NAME % 'restore', + target={'project_id': instance.project_id}) try: self.compute_api.restore(context, instance) - except exception.InstanceUnknownCell as error: - raise webob.exc.HTTPNotFound(explanation=error.format_message()) except exception.QuotaError as error: raise webob.exc.HTTPForbidden(explanation=error.format_message()) except exception.InstanceInvalidState as state_error: @@ -54,13 +53,12 @@ def _force_delete(self, req, id, body): """Force delete of instance before deferred cleanup.""" context = req.environ["nova.context"] instance = common.get_instance(self.compute_api, context, id) - context.can(dd_policies.BASE_POLICY_NAME, + context.can(dd_policies.BASE_POLICY_NAME % 'force', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: self.compute_api.force_delete(context, instance) - except (exception.InstanceNotFound, - exception.InstanceUnknownCell) as e: + except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) diff --git a/nova/api/openstack/compute/evacuate.py b/nova/api/openstack/compute/evacuate.py index fc3a1eeb398..aa358127593 100644 --- a/nova/api/openstack/compute/evacuate.py +++ b/nova/api/openstack/compute/evacuate.py @@ -13,6 +13,7 @@ # under the License. +from oslo_log import log as logging from oslo_utils import strutils from webob import exc @@ -21,7 +22,7 @@ from nova.api.openstack.compute.schemas import evacuate from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute import nova.conf from nova import exception from nova.i18n import _ @@ -30,10 +31,12 @@ CONF = nova.conf.CONF +LOG = logging.getLogger(__name__) + class EvacuateController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(EvacuateController, self).__init__(*args, **kwargs) + def __init__(self): + super(EvacuateController, self).__init__() self.compute_api = compute.API() self.host_api = compute.HostAPI() @@ -69,11 +72,12 @@ def _get_password_v214(self, req, evacuate_body): # TODO(eliqiao): Should be responding here with 202 Accept # because evacuate is an async call, but keep to 200 for # backwards compatibility reasons. - @wsgi.expected_errors((400, 404, 409)) + @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('evacuate') @validation.schema(evacuate.evacuate, "2.0", "2.13") @validation.schema(evacuate.evacuate_v214, "2.14", "2.28") - @validation.schema(evacuate.evacuate_v2_29, "2.29") + @validation.schema(evacuate.evacuate_v2_29, "2.29", "2.67") + @validation.schema(evacuate.evacuate_v2_68, "2.68") def _evacuate(self, req, id, body): """Permit admins to evacuate a server from a failed host to a new one. @@ -117,12 +121,14 @@ def _evacuate(self, req, id, body): try: self.compute_api.evacuate(context, instance, host, on_shared_storage, password, force) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'evacuate', id) - except exception.ComputeServiceInUse as e: + except ( + exception.ComputeServiceInUse, + exception.ForbiddenPortsWithAccelerator, + exception.ExtendedResourceRequestOldCompute, + ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) if (not api_version_request.is_supported(req, min_version='2.14') and diff --git a/nova/api/openstack/compute/extended_availability_zone.py b/nova/api/openstack/compute/extended_availability_zone.py deleted file mode 100644 index ac562c588a0..00000000000 --- a/nova/api/openstack/compute/extended_availability_zone.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2013 Netease, LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Extended Availability Zone Status API extension.""" - -from nova.api.openstack import wsgi -from nova import availability_zones as avail_zone -from nova.policies import extended_availability_zone as eaz_policies - -PREFIX = "OS-EXT-AZ" - - -class ExtendedAZController(wsgi.Controller): - def _extend_server(self, context, server, instance): - # NOTE(mriedem): The OS-EXT-AZ prefix should not be used for new - # attributes after v2.1. They are only in v2.1 for backward compat - # with v2.0. - key = "%s:availability_zone" % PREFIX - az = avail_zone.get_instance_availability_zone(context, instance) - server[key] = az or '' - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['nova.context'] - if context.can(eaz_policies.BASE_POLICY_NAME, fatal=False): - server = resp_obj.obj['server'] - db_instance = req.get_db_instance(server['id']) - self._extend_server(context, server, db_instance) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['nova.context'] - if context.can(eaz_policies.BASE_POLICY_NAME, fatal=False): - servers = list(resp_obj.obj['servers']) - for server in servers: - db_instance = req.get_db_instance(server['id']) - self._extend_server(context, server, db_instance) diff --git a/nova/api/openstack/compute/extended_server_attributes.py b/nova/api/openstack/compute/extended_server_attributes.py deleted file mode 100644 index 6f2c7f92d7c..00000000000 --- a/nova/api/openstack/compute/extended_server_attributes.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Extended Server Attributes API extension.""" - -from nova.api.openstack import api_version_request -from nova.api.openstack import wsgi -from nova import compute -from nova.policies import extended_server_attributes as esa_policies -from nova.policies import servers as servers_policies - - -class ExtendedServerAttributesController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(ExtendedServerAttributesController, self).__init__(*args, - **kwargs) - self.compute_api = compute.API() - - def _extend_server(self, context, server, instance, req): - key = "OS-EXT-SRV-ATTR:hypervisor_hostname" - server[key] = instance.node - - properties = ['host', 'name'] - if api_version_request.is_supported(req, min_version='2.3'): - # NOTE(mriedem): These will use the OS-EXT-SRV-ATTR prefix below - # and that's OK for microversion 2.3 which is being compatible - # with v2.0 for the ec2 API split out from Nova. After this, - # however, new microversions should not be using the - # OS-EXT-SRV-ATTR prefix. - properties += ['reservation_id', 'launch_index', - 'hostname', 'kernel_id', 'ramdisk_id', - 'root_device_name', 'user_data'] - for attr in properties: - if attr == 'name': - key = "OS-EXT-SRV-ATTR:instance_%s" % attr - else: - # NOTE(mriedem): Nothing after microversion 2.3 should use the - # OS-EXT-SRV-ATTR prefix for the attribute key name. - key = "OS-EXT-SRV-ATTR:%s" % attr - server[key] = instance[attr] - - def _server_host_status(self, context, server, instance, req): - host_status = self.compute_api.get_instance_host_status(instance) - server['host_status'] = host_status - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['nova.context'] - authorize_extend = False - authorize_host_status = False - if context.can(esa_policies.BASE_POLICY_NAME, fatal=False): - authorize_extend = True - if (api_version_request.is_supported(req, min_version='2.16') and - context.can(servers_policies.SERVERS % 'show:host_status', - fatal=False)): - authorize_host_status = True - if authorize_extend or authorize_host_status: - server = resp_obj.obj['server'] - db_instance = req.get_db_instance(server['id']) - # server['id'] is guaranteed to be in the cache due to - # the core API adding it in its 'show' method. - if authorize_extend: - self._extend_server(context, server, db_instance, req) - if authorize_host_status: - self._server_host_status(context, server, db_instance, req) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['nova.context'] - authorize_extend = False - authorize_host_status = False - if context.can(esa_policies.BASE_POLICY_NAME, fatal=False): - authorize_extend = True - if (api_version_request.is_supported(req, min_version='2.16') and - context.can(servers_policies.SERVERS % 'show:host_status', - fatal=False)): - authorize_host_status = True - if authorize_extend or authorize_host_status: - servers = list(resp_obj.obj['servers']) - # NOTE(dinesh-bhor): Skipping fetching of instances from cache as - # servers list can be empty if invalid status is provided to the - # core API 'detail' method. - if servers: - instances = req.get_db_instances() - if authorize_host_status: - host_statuses = ( - self.compute_api.get_instances_host_statuses( - instances.values())) - for server in servers: - if authorize_extend: - instance = instances[server['id']] - self._extend_server(context, server, instance, req) - if authorize_host_status: - server['host_status'] = host_statuses[server['id']] diff --git a/nova/api/openstack/compute/extended_status.py b/nova/api/openstack/compute/extended_status.py deleted file mode 100644 index e1bb52b2890..00000000000 --- a/nova/api/openstack/compute/extended_status.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Extended Status Admin API extension.""" - -from nova.api.openstack import wsgi -from nova.policies import extended_status as es_policies - - -class ExtendedStatusController(wsgi.Controller): - def _extend_server(self, server, instance): - # Note(gmann): Removed 'locked_by' from extended status - # to make it same as V2. If needed it can be added with - # microversion. - for state in ['task_state', 'vm_state', 'power_state']: - # NOTE(mriedem): The OS-EXT-STS prefix should not be used for new - # attributes after v2.1. They are only in v2.1 for backward compat - # with v2.0. - key = "%s:%s" % ('OS-EXT-STS', state) - server[key] = instance[state] - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['nova.context'] - if context.can(es_policies.BASE_POLICY_NAME, fatal=False): - server = resp_obj.obj['server'] - db_instance = req.get_db_instance(server['id']) - # server['id'] is guaranteed to be in the cache due to - # the core API adding it in its 'show' method. - self._extend_server(server, db_instance) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['nova.context'] - if context.can(es_policies.BASE_POLICY_NAME, fatal=False): - servers = list(resp_obj.obj['servers']) - for server in servers: - db_instance = req.get_db_instance(server['id']) - # server['id'] is guaranteed to be in the cache due to - # the core API adding it in its 'detail' method. - self._extend_server(server, db_instance) diff --git a/nova/api/openstack/compute/extended_volumes.py b/nova/api/openstack/compute/extended_volumes.py deleted file mode 100644 index a88a4587535..00000000000 --- a/nova/api/openstack/compute/extended_volumes.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Extended Volumes API extension.""" -from oslo_log import log as logging - -from nova.api.openstack import api_version_request -from nova.api.openstack import wsgi -from nova import context -from nova import objects -from nova.policies import extended_volumes as ev_policies - -LOG = logging.getLogger(__name__) - - -class ExtendedVolumesController(wsgi.Controller): - def _extend_server(self, context, server, req, bdms): - volumes_attached = [] - for bdm in bdms: - if bdm.get('volume_id'): - volume_attached = {'id': bdm['volume_id']} - if api_version_request.is_supported(req, min_version='2.3'): - volume_attached['delete_on_termination'] = ( - bdm['delete_on_termination']) - volumes_attached.append(volume_attached) - # NOTE(mriedem): The os-extended-volumes prefix should not be used for - # new attributes after v2.1. They are only in v2.1 for backward compat - # with v2.0. - key = "os-extended-volumes:volumes_attached" - server[key] = volumes_attached - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['nova.context'] - if context.can(ev_policies.BASE_POLICY_NAME, fatal=False): - server = resp_obj.obj['server'] - bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid( - context, [server['id']]) - instance_bdms = self._get_instance_bdms(bdms, server) - self._extend_server(context, server, req, instance_bdms) - - @staticmethod - def _get_instance_bdms_in_multiple_cells(ctxt, servers): - instance_uuids = [server['id'] for server in servers] - inst_maps = objects.InstanceMappingList.get_by_instance_uuids( - ctxt, instance_uuids) - - cell_mappings = {} - for inst_map in inst_maps: - if (inst_map.cell_mapping is not None and - inst_map.cell_mapping.uuid not in cell_mappings): - cell_mappings.update( - {inst_map.cell_mapping.uuid: inst_map.cell_mapping}) - - bdms = {} - results = context.scatter_gather_cells( - ctxt, cell_mappings.values(), 60, - objects.BlockDeviceMappingList.bdms_by_instance_uuid, - instance_uuids) - for cell_uuid, result in results.items(): - if result is context.raised_exception_sentinel: - LOG.warning('Failed to get block device mappings for cell %s', - cell_uuid) - elif result is context.did_not_respond_sentinel: - LOG.warning('Timeout getting block device mappings for cell ' - '%s', cell_uuid) - else: - bdms.update(result) - return bdms - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['nova.context'] - if context.can(ev_policies.BASE_POLICY_NAME, fatal=False): - servers = list(resp_obj.obj['servers']) - bdms = self._get_instance_bdms_in_multiple_cells(context, servers) - for server in servers: - instance_bdms = self._get_instance_bdms(bdms, server) - self._extend_server(context, server, req, instance_bdms) - - def _get_instance_bdms(self, bdms, server): - # server['id'] is guaranteed to be in the cache due to - # the core API adding it in the 'detail' or 'show' method. - # If that instance has since been deleted, it won't be in the - # 'bdms' dictionary though, so use 'get' to avoid KeyErrors. - return bdms.get(server['id'], []) diff --git a/nova/api/openstack/compute/extension_info.py b/nova/api/openstack/compute/extension_info.py index d0e3cc4d557..6aa99f0031f 100644 --- a/nova/api/openstack/compute/extension_info.py +++ b/nova/api/openstack/compute/extension_info.py @@ -852,17 +852,10 @@ class ExtensionInfoController(wsgi.Controller): - def _add_vif_extension(self, all_extensions): - vif_extension_info = {'name': 'ExtendedVIFNet', - 'alias': 'OS-EXT-VIF-NET', - 'description': 'Adds network id parameter' - ' to the virtual interface list.'} - all_extensions.append(vif_extension_info) - @wsgi.expected_errors(()) def index(self, req): context = req.environ['nova.context'] - context.can(ext_policies.BASE_POLICY_NAME) + context.can(ext_policies.BASE_POLICY_NAME, target={}) # NOTE(gmann): This is for v2.1 compatible mode where # extension list should show all extensions as shown by v2. @@ -874,7 +867,7 @@ def index(self, req): @wsgi.expected_errors(404) def show(self, req, id): context = req.environ['nova.context'] - context.can(ext_policies.BASE_POLICY_NAME) + context.can(ext_policies.BASE_POLICY_NAME, target={}) all_exts = EXTENSION_LIST # NOTE(gmann): This is for v2.1 compatible mode where # extension list should show all extensions as shown by v2. diff --git a/nova/api/openstack/compute/flavor_access.py b/nova/api/openstack/compute/flavor_access.py index f4314360009..e17e6f0ddcd 100644 --- a/nova/api/openstack/compute/flavor_access.py +++ b/nova/api/openstack/compute/flavor_access.py @@ -63,7 +63,7 @@ class FlavorActionController(wsgi.Controller): @validation.schema(flavor_access.add_tenant_access) def _add_tenant_access(self, req, id, body): context = req.environ['nova.context'] - context.can(fa_policies.POLICY_ROOT % "add_tenant_access") + context.can(fa_policies.POLICY_ROOT % "add_tenant_access", target={}) vals = body['addTenantAccess'] tenant = vals['tenant'] @@ -89,7 +89,7 @@ def _add_tenant_access(self, req, id, body): def _remove_tenant_access(self, req, id, body): context = req.environ['nova.context'] context.can( - fa_policies.POLICY_ROOT % "remove_tenant_access") + fa_policies.POLICY_ROOT % "remove_tenant_access", target={}) vals = body['removeTenantAccess'] tenant = vals['tenant'] diff --git a/nova/api/openstack/compute/flavor_manage.py b/nova/api/openstack/compute/flavor_manage.py index 68aeef68e36..5b0b0517831 100644 --- a/nova/api/openstack/compute/flavor_manage.py +++ b/nova/api/openstack/compute/flavor_manage.py @@ -20,39 +20,23 @@ from nova.compute import flavors from nova import exception from nova import objects -from nova.policies import base from nova.policies import flavor_extra_specs as fes_policies from nova.policies import flavor_manage as fm_policies -from nova import policy - - -ALIAS = "os-flavor-manage" class FlavorManageController(wsgi.Controller): """The Flavor Lifecycle API controller for the OpenStack API.""" _view_builder_class = flavors_view.ViewBuilder - def __init__(self): - super(FlavorManageController, self).__init__() - # NOTE(oomichi): Return 202 for backwards compatibility but should be # 204 as this operation complete the deletion of aggregate resource and # return no response body. @wsgi.response(202) - @wsgi.expected_errors((404)) + @wsgi.expected_errors(404) @wsgi.action("delete") def _delete(self, req, id): context = req.environ['nova.context'] - # TODO(rb560u): remove this check in future release - using_old_action = \ - policy.verify_deprecated_policy(fm_policies.BASE_POLICY_NAME, - fm_policies.POLICY_ROOT % 'delete', - base.RULE_ADMIN_API, - context) - - if not using_old_action: - context.can(fm_policies.POLICY_ROOT % 'delete') + context.can(fm_policies.POLICY_ROOT % 'delete', target={}) flavor = objects.Flavor(context=context, flavorid=id) try: @@ -70,15 +54,7 @@ def _delete(self, req, id): flavors_view.FLAVOR_DESCRIPTION_MICROVERSION) def _create(self, req, body): context = req.environ['nova.context'] - # TODO(rb560u): remove this check in future release - using_old_action = \ - policy.verify_deprecated_policy(fm_policies.BASE_POLICY_NAME, - fm_policies.POLICY_ROOT % 'create', - base.RULE_ADMIN_API, - context) - - if not using_old_action: - context.can(fm_policies.POLICY_ROOT % 'create') + context.can(fm_policies.POLICY_ROOT % 'create', target={}) vals = body['flavor'] @@ -106,7 +82,6 @@ def _create(self, req, body): description=description) # NOTE(gmann): For backward compatibility, non public flavor # access is not being added for created tenant. Ref -bug/1209101 - req.cache_db_flavor(flavor) except (exception.FlavorExists, exception.FlavorIdExists) as err: raise webob.exc.HTTPConflict(explanation=err.format_message()) @@ -116,8 +91,10 @@ def _create(self, req, body): req, flavors_view.FLAVOR_EXTRA_SPECS_MICROVERSION): include_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) - # NOTE(yikun): This empty extra_spec only for keeping consistent - # with other related flavor api. + # NOTE(yikun): This empty extra_specs only for keeping consistent + # with PUT and GET flavor APIs. extra_specs in flavor is added + # after creating the flavor so to avoid the error in _view_builder + # flavor.extra_specs is populated with the empty string. flavor.extra_specs = {} return self._view_builder.show(req, flavor, include_description, @@ -131,7 +108,7 @@ def _create(self, req, body): def _update(self, req, id, body): # Validate the policy. context = req.environ['nova.context'] - context.can(fm_policies.POLICY_ROOT % 'update') + context.can(fm_policies.POLICY_ROOT % 'update', target={}) # Get the flavor and update the description. try: @@ -141,9 +118,6 @@ def _update(self, req, id, body): except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) - # Cache the flavor so the flavor_access and flavor_rxtx extensions - # can add stuff to the response. - req.cache_db_flavor(flavor) include_extra_specs = False if api_version_request.is_supported( req, flavors_view.FLAVOR_EXTRA_SPECS_MICROVERSION): diff --git a/nova/api/openstack/compute/flavors.py b/nova/api/openstack/compute/flavors.py index 5b5fd907521..3986e428b34 100644 --- a/nova/api/openstack/compute/flavors.py +++ b/nova/api/openstack/compute/flavors.py @@ -29,28 +29,27 @@ from nova.policies import flavor_extra_specs as fes_policies from nova import utils -ALIAS = 'flavors' - class FlavorsController(wsgi.Controller): """Flavor controller for the OpenStack API.""" _view_builder_class = flavors_view.ViewBuilder - @validation.query_schema(schema.index_query) + @validation.query_schema(schema.index_query_275, '2.75') + @validation.query_schema(schema.index_query, '2.0', '2.74') @wsgi.expected_errors(400) def index(self, req): """Return all flavors in brief.""" limited_flavors = self._get_flavors(req) return self._view_builder.index(req, limited_flavors) - @validation.query_schema(schema.index_query) + @validation.query_schema(schema.index_query_275, '2.75') + @validation.query_schema(schema.index_query, '2.0', '2.74') @wsgi.expected_errors(400) def detail(self, req): """Return all flavors in detail.""" context = req.environ['nova.context'] limited_flavors = self._get_flavors(req) - req.cache_db_flavors(limited_flavors) include_extra_specs = False if api_version_request.is_supported( req, flavors_view.FLAVOR_EXTRA_SPECS_MICROVERSION): @@ -65,7 +64,6 @@ def show(self, req, id): context = req.environ['nova.context'] try: flavor = flavors.get_flavor_by_flavor_id(id, ctxt=context) - req.cache_db_flavor(flavor) except exception.FlavorNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) diff --git a/nova/api/openstack/compute/flavors_extraspecs.py b/nova/api/openstack/compute/flavors_extraspecs.py index a5c5c5e150d..c64b648beef 100644 --- a/nova/api/openstack/compute/flavors_extraspecs.py +++ b/nova/api/openstack/compute/flavors_extraspecs.py @@ -13,13 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. -import six import webob +from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import flavors_extraspecs from nova.api.openstack import wsgi from nova.api import validation +from nova.api.validation.extra_specs import validators from nova import exception from nova.i18n import _ from nova.policies import flavor_extra_specs as fes_policies @@ -28,28 +29,38 @@ class FlavorExtraSpecsController(wsgi.Controller): """The flavor extra specs API controller for the OpenStack API.""" + def _get_extra_specs(self, context, flavor_id): flavor = common.get_flavor(context, flavor_id) return dict(extra_specs=flavor.extra_specs) - # NOTE(gmann): Max length for numeric value is being checked - # explicitly as json schema cannot have max length check for numeric value - def _check_extra_specs_value(self, specs): - for value in specs.values(): - try: - if isinstance(value, (six.integer_types, float)): - value = six.text_type(value) + def _check_extra_specs_value(self, req, specs): + validation_supported = api_version_request.is_supported( + req, min_version='2.86', + ) + + for name, value in specs.items(): + # NOTE(gmann): Max length for numeric value is being checked + # explicitly as json schema cannot have max length check for + # numeric value + if isinstance(value, (int, float)): + value = str(value) + try: utils.check_string_length(value, 'extra_specs value', max_length=255) - except exception.InvalidInput as error: - raise webob.exc.HTTPBadRequest( - explanation=error.format_message()) + except exception.InvalidInput as error: + raise webob.exc.HTTPBadRequest( + explanation=error.format_message()) + + if validation_supported: + validators.validate(name, value) @wsgi.expected_errors(404) def index(self, req, flavor_id): """Returns the list of extra specs for a given flavor.""" context = req.environ['nova.context'] - context.can(fes_policies.POLICY_ROOT % 'index') + context.can(fes_policies.POLICY_ROOT % 'index', + target={'project_id': context.project_id}) return self._get_extra_specs(context, flavor_id) # NOTE(gmann): Here should be 201 instead of 200 by v2.1 @@ -59,10 +70,10 @@ def index(self, req, flavor_id): @validation.schema(flavors_extraspecs.create) def create(self, req, flavor_id, body): context = req.environ['nova.context'] - context.can(fes_policies.POLICY_ROOT % 'create') + context.can(fes_policies.POLICY_ROOT % 'create', target={}) specs = body['extra_specs'] - self._check_extra_specs_value(specs) + self._check_extra_specs_value(req, specs) flavor = common.get_flavor(context, flavor_id) try: flavor.extra_specs = dict(flavor.extra_specs, **specs) @@ -77,9 +88,9 @@ def create(self, req, flavor_id, body): @validation.schema(flavors_extraspecs.update) def update(self, req, flavor_id, id, body): context = req.environ['nova.context'] - context.can(fes_policies.POLICY_ROOT % 'update') + context.can(fes_policies.POLICY_ROOT % 'update', target={}) - self._check_extra_specs_value(body) + self._check_extra_specs_value(req, body) if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) @@ -97,7 +108,8 @@ def update(self, req, flavor_id, id, body): def show(self, req, flavor_id, id): """Return a single extra spec item.""" context = req.environ['nova.context'] - context.can(fes_policies.POLICY_ROOT % 'show') + context.can(fes_policies.POLICY_ROOT % 'show', + target={'project_id': context.project_id}) flavor = common.get_flavor(context, flavor_id) try: return {id: flavor.extra_specs[id]} @@ -114,7 +126,7 @@ def show(self, req, flavor_id, id): def delete(self, req, flavor_id, id): """Deletes an existing extra spec.""" context = req.environ['nova.context'] - context.can(fes_policies.POLICY_ROOT % 'delete') + context.can(fes_policies.POLICY_ROOT % 'delete', target={}) flavor = common.get_flavor(context, flavor_id) try: del flavor.extra_specs[id] diff --git a/nova/api/openstack/compute/floating_ip_pools.py b/nova/api/openstack/compute/floating_ip_pools.py index d0b4690f0dc..85899ef3306 100644 --- a/nova/api/openstack/compute/floating_ip_pools.py +++ b/nova/api/openstack/compute/floating_ip_pools.py @@ -15,20 +15,20 @@ from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION from nova.api.openstack import wsgi -from nova import network +from nova.network import neutron from nova.policies import floating_ip_pools as fip_policies -def _translate_floating_ip_view(pool_name): +def _translate_floating_ip_view(pool): return { - 'name': pool_name, + 'name': pool['name'] or pool['id'], } def _translate_floating_ip_pools_view(pools): return { - 'floating_ip_pools': [_translate_floating_ip_view(pool_name) - for pool_name in pools] + 'floating_ip_pools': [_translate_floating_ip_view(pool) + for pool in pools] } @@ -36,14 +36,14 @@ class FloatingIPPoolsController(wsgi.Controller): """The Floating IP Pool API controller for the OpenStack API.""" def __init__(self): - self.network_api = network.API() super(FloatingIPPoolsController, self).__init__() + self.network_api = neutron.API() @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) def index(self, req): """Return a list of pools.""" context = req.environ['nova.context'] - context.can(fip_policies.BASE_POLICY_NAME) + context.can(fip_policies.BASE_POLICY_NAME, target={}) pools = self.network_api.get_floating_ip_pools(context) return _translate_floating_ip_pools_view(pools) diff --git a/nova/api/openstack/compute/floating_ips.py b/nova/api/openstack/compute/floating_ips.py index 3f42129d65a..2cda494788e 100644 --- a/nova/api/openstack/compute/floating_ips.py +++ b/nova/api/openstack/compute/floating_ips.py @@ -17,7 +17,6 @@ from oslo_log import log as logging from oslo_utils import netutils -from oslo_utils import uuidutils import webob from nova.api.openstack.api_version_request \ @@ -26,10 +25,10 @@ from nova.api.openstack.compute.schemas import floating_ips from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ -from nova import network +from nova.network import neutron from nova.policies import floating_ips as fi_policies @@ -37,45 +36,21 @@ def _translate_floating_ip_view(floating_ip): - result = { - 'id': floating_ip['id'], - 'ip': floating_ip['address'], - 'pool': floating_ip['pool'], + instance_id = None + if floating_ip['port_details']: + instance_id = floating_ip['port_details']['device_id'] + + return { + 'floating_ip': { + 'id': floating_ip['id'], + 'ip': floating_ip['floating_ip_address'], + 'pool': floating_ip['network_details']['name'] or ( + floating_ip['network_details']['id']), + 'fixed_ip': floating_ip['fixed_ip_address'], + 'instance_id': instance_id, + } } - # If fixed_ip is unset on floating_ip, then we can't get any of the next - # stuff, so we'll just short-circuit - if 'fixed_ip' not in floating_ip: - result['fixed_ip'] = None - result['instance_id'] = None - return {'floating_ip': result} - - # TODO(rlrossit): These look like dicts, but they're actually versioned - # objects, so we need to do these contain checks because they will not be - # caught by the exceptions below (it raises NotImplementedError and - # OrphanedObjectError. This comment can probably be removed when - # the dict syntax goes away. - try: - if 'address' in floating_ip['fixed_ip']: - result['fixed_ip'] = floating_ip['fixed_ip']['address'] - else: - result['fixed_ip'] = None - except (TypeError, KeyError, AttributeError): - result['fixed_ip'] = None - try: - if 'instance_uuid' in floating_ip['fixed_ip']: - result['instance_id'] = floating_ip['fixed_ip']['instance_uuid'] - else: - result['instance_id'] = None - except (TypeError, KeyError, AttributeError): - result['instance_id'] = None - return {'floating_ip': result} - - -def _translate_floating_ips_view(floating_ips): - return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] - for ip in floating_ips]} - def get_instance_by_floating_ip_addr(self, context, address): try: @@ -97,25 +72,23 @@ def disassociate_floating_ip(self, context, instance, address): self.network_api.disassociate_floating_ip(context, instance, address) except exception.Forbidden: raise webob.exc.HTTPForbidden() - except exception.CannotDisassociateAutoAssignedFloatingIP: - msg = _('Cannot disassociate auto assigned floating IP') - raise webob.exc.HTTPForbidden(explanation=msg) class FloatingIPController(wsgi.Controller): """The Floating IPs API controller for the OpenStack API.""" def __init__(self): - self.compute_api = compute.API() - self.network_api = network.API() super(FloatingIPController, self).__init__() + self.compute_api = compute.API() + self.network_api = neutron.API() @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 404)) def show(self, req, id): """Return data about the given floating IP.""" context = req.environ['nova.context'] - context.can(fi_policies.BASE_POLICY_NAME) + context.can(fi_policies.BASE_POLICY_NAME % 'show', + target={'project_id': context.project_id}) try: floating_ip = self.network_api.get_floating_ip(context, id) @@ -132,17 +105,20 @@ def show(self, req, id): def index(self, req): """Return a list of floating IPs allocated to a project.""" context = req.environ['nova.context'] - context.can(fi_policies.BASE_POLICY_NAME) + context.can(fi_policies.BASE_POLICY_NAME % 'list', + target={'project_id': context.project_id}) floating_ips = self.network_api.get_floating_ips_by_project(context) - return _translate_floating_ips_view(floating_ips) + return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] + for ip in floating_ips]} @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 403, 404)) def create(self, req, body=None): context = req.environ['nova.context'] - context.can(fi_policies.BASE_POLICY_NAME) + context.can(fi_policies.BASE_POLICY_NAME % 'create', + target={'project_id': context.project_id}) pool = None if body and 'pool' in body: @@ -174,7 +150,8 @@ def create(self, req, body=None): @wsgi.expected_errors((400, 403, 404, 409)) def delete(self, req, id): context = req.environ['nova.context'] - context.can(fi_policies.BASE_POLICY_NAME) + context.can(fi_policies.BASE_POLICY_NAME % 'delete', + target={'project_id': context.project_id}) # get the floating ip object try: @@ -185,7 +162,7 @@ def delete(self, req, id): except exception.InvalidID as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - address = floating_ip['address'] + address = floating_ip['floating_ip_address'] # get the associated instance object (if any) instance = get_instance_by_floating_ip_addr(self, context, address) @@ -194,9 +171,6 @@ def delete(self, req, id): context, instance, floating_ip) except exception.Forbidden: raise webob.exc.HTTPForbidden() - except exception.CannotDisassociateAutoAssignedFloatingIP: - msg = _('Cannot disassociate auto assigned floating IP') - raise webob.exc.HTTPForbidden(explanation=msg) except exception.FloatingIpNotFoundForAddress as exc: raise webob.exc.HTTPNotFound(explanation=exc.format_message()) @@ -204,10 +178,10 @@ def delete(self, req, id): class FloatingIPActionController(wsgi.Controller): """This API is deprecated from the Microversion '2.44'.""" - def __init__(self, *args, **kwargs): - super(FloatingIPActionController, self).__init__(*args, **kwargs) + def __init__(self): + super(FloatingIPActionController, self).__init__() self.compute_api = compute.API() - self.network_api = network.API() + self.network_api = neutron.API() @wsgi.Controller.api_version("2.1", "2.43") @wsgi.expected_errors((400, 403, 404)) @@ -216,12 +190,13 @@ def __init__(self, *args, **kwargs): def _add_floating_ip(self, req, id, body): """Associate floating_ip to an instance.""" context = req.environ['nova.context'] - context.can(fi_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, id, + expected_attrs=['flavor']) + context.can(fi_policies.BASE_POLICY_NAME % 'add', + target={'project_id': instance.project_id}) address = body['addFloatingIp']['address'] - instance = common.get_instance(self.compute_api, context, id, - expected_attrs=['flavor']) cached_nwinfo = instance.get_network_info() if not cached_nwinfo: LOG.warning( @@ -272,8 +247,6 @@ def _add_floating_ip(self, req, id, body): except exception.NoFloatingIpInterface: msg = _('l3driver call to add floating IP failed') raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.InstanceUnknownCell as e: - raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.FloatingIpNotFoundForAddress: msg = _('floating IP not found') raise webob.exc.HTTPNotFound(explanation=msg) @@ -297,10 +270,18 @@ def _add_floating_ip(self, req, id, body): def _remove_floating_ip(self, req, id, body): """Dissociate floating_ip from an instance.""" context = req.environ['nova.context'] - context.can(fi_policies.BASE_POLICY_NAME) address = body['removeFloatingIp']['address'] + # get the associated instance object (if any) + instance = get_instance_by_floating_ip_addr(self, context, address) + + target = {} + if instance: + target = {'project_id': instance.project_id} + context.can(fi_policies.BASE_POLICY_NAME % 'remove', + target=target) + # get the floating ip object try: floating_ip = self.network_api.get_floating_ip_by_address(context, @@ -309,20 +290,9 @@ def _remove_floating_ip(self, req, id, body): msg = _("floating IP not found") raise webob.exc.HTTPNotFound(explanation=msg) - # get the associated instance object (if any) - instance = get_instance_by_floating_ip_addr(self, context, address) - # disassociate if associated - if (instance and - floating_ip.get('fixed_ip_id') and - (uuidutils.is_uuid_like(id) and - [instance.uuid == id] or - [instance.id == id])[0]): - try: - disassociate_floating_ip(self, context, instance, address) - except exception.FloatingIpNotAssociated: - msg = _('Floating IP is not associated') - raise webob.exc.HTTPBadRequest(explanation=msg) + if instance and floating_ip['port_id'] and instance.uuid == id: + disassociate_floating_ip(self, context, instance, address) return webob.Response(status_int=202) else: msg = _("Floating IP %(address)s is not associated with instance " diff --git a/nova/api/openstack/compute/hide_server_addresses.py b/nova/api/openstack/compute/hide_server_addresses.py deleted file mode 100644 index b6e837710aa..00000000000 --- a/nova/api/openstack/compute/hide_server_addresses.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Extension for hiding server addresses in certain states.""" - -from nova.api.openstack import wsgi -from nova.compute import vm_states -import nova.conf -from nova.policies import hide_server_addresses as hsa_policies - - -CONF = nova.conf.CONF - - -class Controller(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(Controller, self).__init__(*args, **kwargs) - hidden_states = CONF.api.hide_server_address_states - - # NOTE(jkoelker) _ is not considered uppercase ;) - valid_vm_states = [getattr(vm_states, state) - for state in dir(vm_states) - if state.isupper()] - self.hide_address_states = [state.lower() - for state in hidden_states - if state in valid_vm_states] - - def _perhaps_hide_addresses(self, instance, resp_server): - if instance.get('vm_state') in self.hide_address_states: - resp_server['addresses'] = {} - - @wsgi.extends - def show(self, req, resp_obj, id): - resp = resp_obj - context = req.environ['nova.context'] - if not context.can(hsa_policies.BASE_POLICY_NAME, fatal=False): - return - - if 'server' in resp.obj and 'addresses' in resp.obj['server']: - resp_server = resp.obj['server'] - instance = req.get_db_instance(resp_server['id']) - self._perhaps_hide_addresses(instance, resp_server) - - @wsgi.extends - def detail(self, req, resp_obj): - resp = resp_obj - context = req.environ['nova.context'] - if not context.can(hsa_policies.BASE_POLICY_NAME, fatal=False): - return - - for server in list(resp.obj['servers']): - if 'addresses' in server: - instance = req.get_db_instance(server['id']) - self._perhaps_hide_addresses(instance, server) diff --git a/nova/api/openstack/compute/hosts.py b/nova/api/openstack/compute/hosts.py index 510c8f3beeb..a1ad8f5d9bf 100644 --- a/nova/api/openstack/compute/hosts.py +++ b/nova/api/openstack/compute/hosts.py @@ -16,14 +16,13 @@ """The hosts admin extension.""" from oslo_log import log as logging -import six import webob.exc from nova.api.openstack import common from nova.api.openstack.compute.schemas import hosts from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import context as nova_context from nova import exception from nova import objects @@ -35,8 +34,8 @@ class HostController(wsgi.Controller): """The Hosts API controller for the OpenStack API.""" def __init__(self): - self.api = compute.HostAPI() super(HostController, self).__init__() + self.api = compute.HostAPI() @wsgi.Controller.api_version("2.1", "2.42") @validation.query_schema(hosts.index_query) @@ -53,15 +52,6 @@ def index(self, req): | {'host_name': 'some.celly.host.name', | 'service': 'cells', | 'zone': 'internal'}, - | {'host_name': 'console1.host.com', - | 'service': 'consoleauth', - | 'zone': 'internal'}, - | {'host_name': 'network1.host.com', - | 'service': 'network', - | 'zone': 'internal'}, - | {'host_name': 'network2.host.com', - | 'service': 'network', - | 'zone': 'internal'}, | {'host_name': 'compute1.host.com', | 'service': 'compute', | 'zone': 'nova'}, @@ -80,7 +70,8 @@ def index(self, req): """ context = req.environ['nova.context'] - context.can(hosts_policies.BASE_POLICY_NAME) + context.can(hosts_policies.POLICY_NAME % 'list', + target={}) filters = {'disabled': False} zone = req.GET.get('zone', None) if zone: @@ -117,7 +108,8 @@ def read_enabled(orig_val): return val == "enable" context = req.environ['nova.context'] - context.can(hosts_policies.BASE_POLICY_NAME) + context.can(hosts_policies.POLICY_NAME % 'update', + target={}) # See what the user wants to 'update' status = body.get('status') maint_mode = body.get('maintenance_mode') @@ -177,7 +169,6 @@ def _set_enabled_status(self, context, host_name, enabled): def _host_power_action(self, req, host_name, action): """Reboots, shuts down or powers up the host.""" context = req.environ['nova.context'] - context.can(hosts_policies.BASE_POLICY_NAME) try: result = self.api.host_power_action(context, host_name, action) except NotImplementedError: @@ -191,33 +182,48 @@ def _host_power_action(self, req, host_name, action): @wsgi.Controller.api_version("2.1", "2.42") @wsgi.expected_errors((400, 404, 501)) def startup(self, req, id): + context = req.environ['nova.context'] + context.can(hosts_policies.POLICY_NAME % 'start', + target={}) return self._host_power_action(req, host_name=id, action="startup") @wsgi.Controller.api_version("2.1", "2.42") @wsgi.expected_errors((400, 404, 501)) def shutdown(self, req, id): + context = req.environ['nova.context'] + context.can(hosts_policies.POLICY_NAME % 'shutdown', + target={}) return self._host_power_action(req, host_name=id, action="shutdown") @wsgi.Controller.api_version("2.1", "2.42") @wsgi.expected_errors((400, 404, 501)) def reboot(self, req, id): + context = req.environ['nova.context'] + context.can(hosts_policies.POLICY_NAME % 'reboot', + target={}) return self._host_power_action(req, host_name=id, action="reboot") @staticmethod - def _get_total_resources(host_name, compute_node): + def _get_total_resources(host_name, compute_nodes): return {'resource': {'host': host_name, 'project': '(total)', - 'cpu': compute_node.vcpus, - 'memory_mb': compute_node.memory_mb, - 'disk_gb': compute_node.local_gb}} + 'cpu': sum(cn.vcpus + for cn in compute_nodes), + 'memory_mb': sum(cn.memory_mb + for cn in compute_nodes), + 'disk_gb': sum(cn.local_gb + for cn in compute_nodes)}} @staticmethod - def _get_used_now_resources(host_name, compute_node): + def _get_used_now_resources(host_name, compute_nodes): return {'resource': {'host': host_name, 'project': '(used_now)', - 'cpu': compute_node.vcpus_used, - 'memory_mb': compute_node.memory_mb_used, - 'disk_gb': compute_node.local_gb_used}} + 'cpu': sum(cn.vcpus_used + for cn in compute_nodes), + 'memory_mb': sum(cn.memory_mb_used + for cn in compute_nodes), + 'disk_gb': sum(cn.local_gb_used + for cn in compute_nodes)}} @staticmethod def _get_resource_totals_from_instances(host_name, instances): @@ -266,25 +272,25 @@ def show(self, req, id): 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30} """ context = req.environ['nova.context'] - context.can(hosts_policies.BASE_POLICY_NAME) + context.can(hosts_policies.POLICY_NAME % 'show', + target={}) host_name = id try: mapping = objects.HostMapping.get_by_host(context, host_name) nova_context.set_target_cell(context, mapping.cell_mapping) - compute_node = ( - objects.ComputeNode.get_first_node_by_host_for_old_compat( - context, host_name)) + compute_nodes = objects.ComputeNodeList.get_all_by_host( + context, host_name) instances = self.api.instance_get_all_by_host(context, host_name) except (exception.ComputeHostNotFound, exception.HostMappingNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) - resources = [self._get_total_resources(host_name, compute_node)] + resources = [self._get_total_resources(host_name, compute_nodes)] resources.append(self._get_used_now_resources(host_name, - compute_node)) + compute_nodes)) resources.append(self._get_resource_totals_from_instances(host_name, instances)) by_proj_resources = self._get_resources_by_project(host_name, instances) - for resource in six.itervalues(by_proj_resources): + for resource in by_proj_resources.values(): resources.append({'resource': resource}) return {'host': resources} diff --git a/nova/api/openstack/compute/hypervisors.py b/nova/api/openstack/compute/hypervisors.py index df6c206d0b0..63908bc81c1 100644 --- a/nova/api/openstack/compute/hypervisors.py +++ b/nova/api/openstack/compute/hypervisors.py @@ -27,8 +27,7 @@ from nova.api.openstack.compute.views import hypervisors as hyper_view from nova.api.openstack import wsgi from nova.api import validation -from nova.cells import utils as cells_utils -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import hypervisors as hv_policies @@ -46,54 +45,88 @@ class HypervisorsController(wsgi.Controller): _view_builder_class = hyper_view.ViewBuilder def __init__(self): + super(HypervisorsController, self).__init__() self.host_api = compute.HostAPI() self.servicegroup_api = servicegroup.API() - super(HypervisorsController, self).__init__() - def _view_hypervisor(self, hypervisor, service, detail, req, servers=None, - **kwargs): + def _view_hypervisor( + self, hypervisor, service, detail, req, servers=None, + with_servers=False, + ): alive = self.servicegroup_api.service_is_up(service) # The 2.53 microversion returns the compute node uuid rather than id. uuid_for_id = api_version_request.is_supported( req, min_version=UUID_FOR_ID_MIN_VERSION) + hyp_dict = { 'id': hypervisor.uuid if uuid_for_id else hypervisor.id, 'hypervisor_hostname': hypervisor.hypervisor_hostname, 'state': 'up' if alive else 'down', - 'status': ('disabled' if service.disabled - else 'enabled'), - } + 'status': 'disabled' if service.disabled else 'enabled', + } if detail: - for field in ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used', - 'memory_mb_used', 'local_gb_used', - 'hypervisor_type', 'hypervisor_version', - 'free_ram_mb', 'free_disk_gb', 'current_workload', - 'running_vms', 'disk_available_least', 'host_ip'): + for field in ( + 'hypervisor_type', 'hypervisor_version', 'host_ip', + ): hyp_dict[field] = getattr(hypervisor, field) - service_id = service.uuid if uuid_for_id else service.id hyp_dict['service'] = { - 'id': service_id, + 'id': service.uuid if uuid_for_id else service.id, 'host': hypervisor.host, 'disabled_reason': service.disabled_reason, - } + } - if api_version_request.is_supported(req, min_version='2.28'): + # The 2.88 microversion removed these fields, so only add them on older + # microversions + if detail and api_version_request.is_supported( + req, max_version='2.87', + ): + for field in ( + 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used', + 'memory_mb_used', 'local_gb_used', 'free_ram_mb', + 'free_disk_gb', 'current_workload', 'running_vms', + 'disk_available_least', + ): + hyp_dict[field] = getattr(hypervisor, field) + + if api_version_request.is_supported(req, max_version='2.27'): + hyp_dict['cpu_info'] = hypervisor.cpu_info + else: if hypervisor.cpu_info: hyp_dict['cpu_info'] = jsonutils.loads(hypervisor.cpu_info) else: hyp_dict['cpu_info'] = {} - else: - hyp_dict['cpu_info'] = hypervisor.cpu_info - if servers: - hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid']) - for serv in servers] + # The 2.88 microversion also *added* the 'uptime' field to the response + if detail and api_version_request.is_supported( + req, min_version='2.88', + ): + try: + hyp_dict['uptime'] = self.host_api.get_host_uptime( + req.environ['nova.context'], hypervisor.host) + except ( + NotImplementedError, + exception.ComputeServiceUnavailable, + exception.HostMappingNotFound, + exception.HostNotFound, + ): + # Not all virt drivers support this, and it's not generally + # possible to get uptime for a down host + hyp_dict['uptime'] = None - # Add any additional info - if kwargs: - hyp_dict.update(kwargs) + if servers: + hyp_dict['servers'] = [ + {'name': serv['name'], 'uuid': serv['uuid']} + for serv in servers + ] + # The 2.75 microversion adds 'servers' field always in response. + # Empty list if there are no servers on hypervisors and it is + # requested in request. + elif with_servers and api_version_request.is_supported( + req, min_version='2.75', + ): + hyp_dict['servers'] = [] return hyp_dict @@ -117,7 +150,6 @@ def _get_hypervisors(self, req, detail=False, limit=None, marker=None, :param links: If True, return links in the response for paging. """ context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) # The 2.53 microversion moves the search and servers routes into # GET /os-hypervisors and GET /os-hypervisors/detail with query @@ -168,17 +200,23 @@ def _get_hypervisors(self, req, detail=False, limit=None, marker=None, context, hyp.host) service = self.host_api.service_get_by_compute_host( context, hyp.host) - hypervisors_list.append( - self._view_hypervisor( - hyp, service, detail, req, servers=instances)) - except (exception.ComputeHostNotFound, - exception.HostMappingNotFound): + except ( + exception.ComputeHostNotFound, + exception.HostMappingNotFound, + ): # The compute service could be deleted which doesn't delete # the compute node record, that has to be manually removed # from the database so we just ignore it when listing nodes. LOG.debug('Unable to find service for compute node %s. The ' 'service may be deleted and compute nodes need to ' 'be manually cleaned up.', hyp.host) + continue + + hypervisor = self._view_hypervisor( + hyp, service, detail, req, servers=instances, + with_servers=with_servers, + ) + hypervisors_list.append(hypervisor) hypervisors_dict = dict(hypervisors=hypervisors_list) if links: @@ -204,17 +242,19 @@ def index(self, req): @wsgi.Controller.api_version("2.33", "2.52") # noqa @validation.query_schema(hyper_schema.list_query_schema_v233) - @wsgi.expected_errors((400)) - def index(self, req): + @wsgi.expected_errors(400) + def index(self, req): # noqa limit, marker = common.get_limit_and_marker(req) return self._index(req, limit=limit, marker=marker, links=True) @wsgi.Controller.api_version("2.1", "2.32") # noqa @wsgi.expected_errors(()) - def index(self, req): + def index(self, req): # noqa return self._index(req) def _index(self, req, limit=None, marker=None, links=False): + context = req.environ['nova.context'] + context.can(hv_policies.BASE_POLICY_NAME % 'list', target={}) return self._get_hypervisors(req, detail=False, limit=limit, marker=marker, links=links) @@ -235,16 +275,18 @@ def detail(self, req): @wsgi.Controller.api_version("2.33", "2.52") # noqa @validation.query_schema(hyper_schema.list_query_schema_v233) @wsgi.expected_errors((400)) - def detail(self, req): + def detail(self, req): # noqa limit, marker = common.get_limit_and_marker(req) return self._detail(req, limit=limit, marker=marker, links=True) @wsgi.Controller.api_version("2.1", "2.32") # noqa @wsgi.expected_errors(()) - def detail(self, req): + def detail(self, req): # noqa return self._detail(req) def _detail(self, req, limit=None, marker=None, links=False): + context = req.environ['nova.context'] + context.can(hv_policies.BASE_POLICY_NAME % 'list-detail', target={}) return self._get_hypervisors(req, detail=True, limit=limit, marker=marker, links=links) @@ -267,11 +309,6 @@ def _validate_id(req, hypervisor_id): msg = _('Invalid uuid %s') % hypervisor_id raise webob.exc.HTTPBadRequest(explanation=msg) else: - # This API is supported for cells v1 and as such the id can be - # a cell v1 delimited string, so we have to parse it first. - if cells_utils.CELL_ITEM_SEP in str(hypervisor_id): - hypervisor_id = cells_utils.split_cell_and_item( - hypervisor_id)[1] try: utils.validate_integer(hypervisor_id, 'id') except exception.InvalidInput: @@ -296,51 +333,87 @@ def show(self, req, id): @wsgi.Controller.api_version("2.1", "2.52") # noqa F811 @wsgi.expected_errors(404) - def show(self, req, id): + def show(self, req, id): # noqa return self._show(req, id) def _show(self, req, id, with_servers=False): context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'show', target={}) self._validate_id(req, id) try: hyp = self.host_api.compute_node_get(context, id) - instances = None - if with_servers: + except exception.ComputeHostNotFound: + # If the ComputeNode is missing, that's a straight up 404 + msg = _("Hypervisor with ID '%s' could not be found.") % id + raise webob.exc.HTTPNotFound(explanation=msg) + + instances = None + if with_servers: + try: instances = self.host_api.instance_get_all_by_host( context, hyp.host) + except exception.HostMappingNotFound: + msg = _("Hypervisor with ID '%s' could not be found.") % id + raise webob.exc.HTTPNotFound(explanation=msg) + + try: service = self.host_api.service_get_by_compute_host( context, hyp.host) - except (ValueError, exception.ComputeHostNotFound, - exception.HostMappingNotFound): + except ( + exception.ComputeHostNotFound, + exception.HostMappingNotFound, + ): msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) - return dict(hypervisor=self._view_hypervisor( - hyp, service, True, req, instances)) + return { + 'hypervisor': self._view_hypervisor( + hyp, service, detail=True, req=req, servers=instances, + with_servers=with_servers, + ), + } + + @wsgi.Controller.api_version('2.1', '2.87') @wsgi.expected_errors((400, 404, 501)) def uptime(self, req, id): + """Prior to microversion 2.88, you could retrieve a special version of + the hypervisor detail view that included uptime. Starting in 2.88, this + field is now included in the standard detail view, making this API + unnecessary. + """ context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'uptime', target={}) self._validate_id(req, id) try: hyp = self.host_api.compute_node_get(context, id) - except (ValueError, exception.ComputeHostNotFound): + except exception.ComputeHostNotFound: + # If the ComputeNode is missing, that's a straight up 404 + msg = _("Hypervisor with ID '%s' could not be found.") % id + raise webob.exc.HTTPNotFound(explanation=msg) + + try: + service = self.host_api.service_get_by_compute_host( + context, hyp.host) + except ( + exception.ComputeHostNotFound, + exception.HostMappingNotFound, + ): msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) # Get the uptime try: - host = hyp.host - uptime = self.host_api.get_host_uptime(context, host) - service = self.host_api.service_get_by_compute_host(context, host) + uptime = self.host_api.get_host_uptime(context, hyp.host) except NotImplementedError: common.raise_feature_not_supported() - except exception.ComputeServiceUnavailable as e: + except ( + exception.ComputeServiceUnavailable, + exception.HostNotFound, + ) as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) except exception.HostMappingNotFound: # NOTE(danms): This mirrors the compute_node_get() behavior @@ -349,8 +422,10 @@ def uptime(self, req, id): msg = _("Hypervisor with ID '%s' could not be found.") % id raise webob.exc.HTTPNotFound(explanation=msg) - return dict(hypervisor=self._view_hypervisor(hyp, service, False, req, - uptime=uptime)) + hypervisor = self._view_hypervisor(hyp, service, False, req) + hypervisor['uptime'] = uptime + + return {'hypervisor': hypervisor} @wsgi.Controller.api_version('2.1', '2.52') @wsgi.expected_errors(404) @@ -361,19 +436,34 @@ def search(self, req, id): index and detail methods. """ context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) - hypervisors = self._get_compute_nodes_by_name_pattern(context, id) - try: - return dict(hypervisors=[ - self._view_hypervisor( - hyp, - self.host_api.service_get_by_compute_host(context, - hyp.host), - False, req) - for hyp in hypervisors]) - except exception.HostMappingNotFound: - msg = _("No hypervisor matching '%s' could be found.") % id - raise webob.exc.HTTPNotFound(explanation=msg) + context.can(hv_policies.BASE_POLICY_NAME % 'search', target={}) + + # Get all compute nodes with a hypervisor_hostname that matches + # the given pattern. If none are found then it's a 404 error. + compute_nodes = self._get_compute_nodes_by_name_pattern(context, id) + + hypervisors = [] + for compute_node in compute_nodes: + try: + service = self.host_api.service_get_by_compute_host( + context, compute_node.host) + except exception.ComputeHostNotFound: + # The compute service could be deleted which doesn't delete + # the compute node record, that has to be manually removed + # from the database so we just ignore it when listing nodes. + LOG.debug( + 'Unable to find service for compute node %s. The ' + 'service may be deleted and compute nodes need to ' + 'be manually cleaned up.', compute_node.host) + continue + except exception.HostMappingNotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + hypervisor = self._view_hypervisor( + compute_node, service, False, req) + hypervisors.append(hypervisor) + + return {'hypervisors': hypervisors} @wsgi.Controller.api_version('2.1', '2.52') @wsgi.expected_errors(404) @@ -385,25 +475,49 @@ def servers(self, req, id): GET /os-hypervisors index and detail methods. """ context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'servers', target={}) + + # Get all compute nodes with a hypervisor_hostname that matches + # the given pattern. If none are found then it's a 404 error. compute_nodes = self._get_compute_nodes_by_name_pattern(context, id) + hypervisors = [] for compute_node in compute_nodes: try: instances = self.host_api.instance_get_all_by_host(context, compute_node.host) + except exception.HostMappingNotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + try: service = self.host_api.service_get_by_compute_host( context, compute_node.host) + except exception.ComputeHostNotFound: + # The compute service could be deleted which doesn't delete + # the compute node record, that has to be manually removed + # from the database so we just ignore it when listing nodes. + LOG.debug( + 'Unable to find service for compute node %s. The ' + 'service may be deleted and compute nodes need to ' + 'be manually cleaned up.', compute_node.host) + continue except exception.HostMappingNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) - hyp = self._view_hypervisor(compute_node, service, False, req, - instances) - hypervisors.append(hyp) - return dict(hypervisors=hypervisors) + hypervisor = self._view_hypervisor( + compute_node, service, False, req, instances) + hypervisors.append(hypervisor) + + return {'hypervisors': hypervisors} + + @wsgi.Controller.api_version('2.1', '2.87') @wsgi.expected_errors(()) def statistics(self, req): + """Prior to microversion 2.88, you could get statistics for the + hypervisor. Most of these are now accessible from placement and the few + that aren't as misleading and frequently misunderstood. + """ context = req.environ['nova.context'] - context.can(hv_policies.BASE_POLICY_NAME) + context.can(hv_policies.BASE_POLICY_NAME % 'statistics', target={}) stats = self.host_api.compute_node_statistics(context) return dict(hypervisor_statistics=stats) diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py index f7071061849..dcaded60ee3 100644 --- a/nova/api/openstack/compute/image_metadata.py +++ b/nova/api/openstack/compute/image_metadata.py @@ -24,14 +24,15 @@ from nova.api import validation from nova import exception from nova.i18n import _ -import nova.image +from nova.image import glance class ImageMetadataController(wsgi.Controller): """The image metadata API controller for the OpenStack API.""" def __init__(self): - self.image_api = nova.image.API() + super(ImageMetadataController, self).__init__() + self.image_api = glance.API() def _get_image(self, context, image_id): try: diff --git a/nova/api/openstack/compute/image_size.py b/nova/api/openstack/compute/image_size.py deleted file mode 100644 index 8ed4ae6135f..00000000000 --- a/nova/api/openstack/compute/image_size.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.api.openstack import wsgi -from nova.policies import image_size as is_policies - - -class ImageSizeController(wsgi.Controller): - - def _extend_image(self, image, image_cache): - # NOTE(mriedem): The OS-EXT-* prefix should not be used for new - # attributes after v2.1. They are only in v2.1 for backward compat - # with v2.0. - key = "OS-EXT-IMG-SIZE:size" - image[key] = image_cache['size'] - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ["nova.context"] - if context.can(is_policies.BASE_POLICY_NAME, fatal=False): - image_resp = resp_obj.obj['image'] - # image guaranteed to be in the cache due to the core API adding - # it in its 'show' method - image_cached = req.get_db_item('images', image_resp['id']) - self._extend_image(image_resp, image_cached) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['nova.context'] - if context.can(is_policies.BASE_POLICY_NAME, fatal=False): - images_resp = list(resp_obj.obj['images']) - # images guaranteed to be in the cache due to the core API adding - # it in its 'detail' method - for image in images_resp: - image_cached = req.get_db_item('images', image['id']) - self._extend_image(image, image_cached) diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py index 28d791a36e3..1adf20c99b3 100644 --- a/nova/api/openstack/compute/images.py +++ b/nova/api/openstack/compute/images.py @@ -22,8 +22,7 @@ from nova.api.openstack import wsgi from nova import exception from nova.i18n import _ -import nova.image -import nova.utils +from nova.image import glance SUPPORTED_FILTERS = { @@ -42,9 +41,9 @@ class ImagesController(wsgi.Controller): _view_builder_class = views_images.ViewBuilder - def __init__(self, **kwargs): - super(ImagesController, self).__init__(**kwargs) - self._image_api = nova.image.API() + def __init__(self): + super(ImagesController, self).__init__() + self._image_api = glance.API() def _get_filters(self, req): """Return a dictionary of query param filters from the request. @@ -89,7 +88,6 @@ def show(self, req, id): explanation = _("Image not found.") raise webob.exc.HTTPNotFound(explanation=explanation) - req.cache_db_items('images', [image], 'id') return self._view_builder.show(req, image) @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @@ -149,5 +147,4 @@ def detail(self, req): except exception.Invalid as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - req.cache_db_items('images', images, 'id') return self._view_builder.detail(req, images) diff --git a/nova/api/openstack/compute/instance_actions.py b/nova/api/openstack/compute/instance_actions.py index aef7440340c..8de6a8358df 100644 --- a/nova/api/openstack/compute/instance_actions.py +++ b/nova/api/openstack/compute/instance_actions.py @@ -25,7 +25,7 @@ import instance_actions as instance_actions_view from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import instance_actions as ia_policies @@ -53,8 +53,9 @@ def _format_action(self, action_raw, action_keys): action[key] = action_raw.get(key) return action - def _format_event(self, event_raw, project_id, show_traceback=False, - show_host=False, show_hostid=False): + @staticmethod + def _format_event(event_raw, project_id, show_traceback=False, + show_host=False, show_hostid=False, show_details=False): event = {} for key in EVENT_KEYS: # By default, non-admins are not allowed to see traceback details. @@ -67,6 +68,8 @@ def _format_event(self, event_raw, project_id, show_traceback=False, if show_hostid: event['hostId'] = utils.generate_hostid(event_raw['host'], project_id) + if show_details: + event['details'] = event_raw['details'] return event @wsgi.Controller.api_version("2.1", "2.20") @@ -74,7 +77,7 @@ def _get_instance(self, req, context, server_id): return common.get_instance(self.compute_api, context, server_id) @wsgi.Controller.api_version("2.21") # noqa - def _get_instance(self, req, context, server_id): + def _get_instance(self, req, context, server_id): # noqa with utils.temporary_mutation(context, read_deleted='yes'): return common.get_instance(self.compute_api, context, server_id) @@ -84,7 +87,8 @@ def index(self, req, server_id): """Returns the list of actions recorded for a given instance.""" context = req.environ["nova.context"] instance = self._get_instance(req, context, server_id) - context.can(ia_policies.BASE_POLICY_NAME, instance) + context.can(ia_policies.BASE_POLICY_NAME % 'list', + target={'project_id': instance.project_id}) actions_raw = self.action_api.actions_get(context, instance) actions = [self._format_action(action, ACTION_KEYS) for action in actions_raw] @@ -92,19 +96,32 @@ def index(self, req, server_id): @wsgi.Controller.api_version("2.58") # noqa @wsgi.expected_errors((400, 404)) + @validation.query_schema(schema_instance_actions.list_query_params_v266, + "2.66") @validation.query_schema(schema_instance_actions.list_query_params_v258, - "2.58") - def index(self, req, server_id): + "2.58", "2.65") + def index(self, req, server_id): # noqa """Returns the list of actions recorded for a given instance.""" context = req.environ["nova.context"] instance = self._get_instance(req, context, server_id) - context.can(ia_policies.BASE_POLICY_NAME, instance) + context.can(ia_policies.BASE_POLICY_NAME % 'list', + target={'project_id': instance.project_id}) search_opts = {} search_opts.update(req.GET) if 'changes-since' in search_opts: search_opts['changes-since'] = timeutils.parse_isotime( search_opts['changes-since']) + if 'changes-before' in search_opts: + search_opts['changes-before'] = timeutils.parse_isotime( + search_opts['changes-before']) + changes_since = search_opts.get('changes-since') + if (changes_since and search_opts['changes-before'] < + search_opts['changes-since']): + msg = _('The value of changes-since must be less than ' + 'or equal to changes-before.') + raise exc.HTTPBadRequest(explanation=msg) + limit, marker = common.get_limit_and_marker(req) try: actions_raw = self.action_api.actions_get(context, instance, @@ -126,7 +143,8 @@ def show(self, req, server_id, id): """Return data about the given instance action.""" context = req.environ['nova.context'] instance = self._get_instance(req, context, server_id) - context.can(ia_policies.BASE_POLICY_NAME, instance) + context.can(ia_policies.BASE_POLICY_NAME % 'show', + target={'project_id': instance.project_id}) action = self.action_api.action_get_by_request_id(context, instance, id) if action is None: @@ -146,7 +164,9 @@ def show(self, req, server_id, id): show_events = False show_traceback = False show_host = False - if context.can(ia_policies.POLICY_ROOT % 'events', fatal=False): + if context.can(ia_policies.BASE_POLICY_NAME % 'events', + target={'project_id': instance.project_id}, + fatal=False): # For all microversions, the user can see all event details # including the traceback. show_events = show_traceback = True @@ -161,10 +181,27 @@ def show(self, req, server_id, id): show_hostid = api_version_request.is_supported(req, '2.62') if show_events: + # NOTE(brinzhang): Event details are shown since microversion + # 2.84. + show_details = False + support_v284 = api_version_request.is_supported(req, '2.84') + if support_v284: + show_details = context.can( + ia_policies.BASE_POLICY_NAME % 'events:details', + target={'project_id': instance.project_id}, fatal=False) + events_raw = self.action_api.action_events_get(context, instance, action_id) + # NOTE(takashin): The project IDs of instance action events + # become null (None) when instance action events are created + # by periodic tasks. If the project ID is null (None), + # it causes an error when 'hostId' is generated. + # If the project ID is null (None), pass the project ID of + # the server instead of that of instance action events. action['events'] = [self._format_event( - evt, action['project_id'], show_traceback=show_traceback, - show_host=show_host, show_hostid=show_hostid + evt, action['project_id'] or instance.project_id, + show_traceback=show_traceback, + show_host=show_host, show_hostid=show_hostid, + show_details=show_details ) for evt in events_raw] return {'instanceAction': action} diff --git a/nova/api/openstack/compute/instance_usage_audit_log.py b/nova/api/openstack/compute/instance_usage_audit_log.py index 702aceab7d3..f15a210b911 100644 --- a/nova/api/openstack/compute/instance_usage_audit_log.py +++ b/nova/api/openstack/compute/instance_usage_audit_log.py @@ -19,7 +19,7 @@ import webob.exc from nova.api.openstack import wsgi -from nova import compute +from nova.compute import api as compute from nova.compute import rpcapi as compute_rpcapi from nova.i18n import _ from nova.policies import instance_usage_audit_log as iual_policies @@ -27,20 +27,22 @@ class InstanceUsageAuditLogController(wsgi.Controller): + def __init__(self): + super(InstanceUsageAuditLogController, self).__init__() self.host_api = compute.HostAPI() @wsgi.expected_errors(()) def index(self, req): context = req.environ['nova.context'] - context.can(iual_policies.BASE_POLICY_NAME) + context.can(iual_policies.BASE_POLICY_NAME % 'list', target={}) task_log = self._get_audit_task_logs(context) return {'instance_usage_audit_logs': task_log} @wsgi.expected_errors(400) def show(self, req, id): context = req.environ['nova.context'] - context.can(iual_policies.BASE_POLICY_NAME) + context.can(iual_policies.BASE_POLICY_NAME % 'show', target={}) try: if '.' in id: before_date = datetime.datetime.strptime(str(id), diff --git a/nova/api/openstack/compute/ips.py b/nova/api/openstack/compute/ips.py index 6b67447c415..03e2354f858 100644 --- a/nova/api/openstack/compute/ips.py +++ b/nova/api/openstack/compute/ips.py @@ -15,42 +15,40 @@ from webob import exc -import nova from nova.api.openstack import common from nova.api.openstack.compute.views import addresses as views_addresses from nova.api.openstack import wsgi +from nova.compute import api as compute from nova.i18n import _ from nova.policies import ips as ips_policies class IPsController(wsgi.Controller): """The servers addresses API controller for the OpenStack API.""" - # Note(gmann): here using V2 view builder instead of V3 to have V2.1 - # server ips response same as V2 which does not include "OS-EXT-IPS:type" - # & "OS-EXT-IPS-MAC:mac_addr". If needed those can be added with - # microversion by using V2.1 view builder. _view_builder_class = views_addresses.ViewBuilder - def __init__(self, **kwargs): - super(IPsController, self).__init__(**kwargs) - self._compute_api = nova.compute.API() + def __init__(self): + super(IPsController, self).__init__() + self._compute_api = compute.API() @wsgi.expected_errors(404) def index(self, req, server_id): context = req.environ["nova.context"] - context.can(ips_policies.POLICY_ROOT % 'index') instance = common.get_instance(self._compute_api, context, server_id) + context.can(ips_policies.POLICY_ROOT % 'index', + target={'project_id': instance.project_id}) networks = common.get_networks_for_instance(context, instance) - return self._view_builder.index(networks) + return self._view_builder.index(req, networks) @wsgi.expected_errors(404) def show(self, req, server_id, id): context = req.environ["nova.context"] - context.can(ips_policies.POLICY_ROOT % 'show') instance = common.get_instance(self._compute_api, context, server_id) + context.can(ips_policies.POLICY_ROOT % 'show', + target={'project_id': instance.project_id}) networks = common.get_networks_for_instance(context, instance) if id not in networks: msg = _("Instance is not a member of specified network") raise exc.HTTPNotFound(explanation=msg) - return self._view_builder.show(networks[id], id) + return self._view_builder.show(req, networks[id], id) diff --git a/nova/api/openstack/compute/keypairs.py b/nova/api/openstack/compute/keypairs.py index b3c7bbb111e..1fa1684322a 100644 --- a/nova/api/openstack/compute/keypairs.py +++ b/nova/api/openstack/compute/keypairs.py @@ -38,20 +38,8 @@ class KeypairController(wsgi.Controller): _view_builder_class = keypairs_view.ViewBuilder def __init__(self): - self.api = compute_api.KeypairAPI() super(KeypairController, self).__init__() - - def _filter_keypair(self, keypair, **attrs): - # TODO(claudiub): After v2 and v2.1 is no longer supported, - # keypair.type can be added to the clean dict below - clean = { - 'name': keypair.name, - 'public_key': keypair.public_key, - 'fingerprint': keypair.fingerprint, - } - for attr in attrs: - clean[attr] = keypair[attr] - return clean + self.api = compute_api.KeypairAPI() @wsgi.Controller.api_version("2.10") @wsgi.response(201) @@ -70,13 +58,13 @@ def create(self, req, body): """ # handle optional user-id for admin only user_id = body['keypair'].get('user_id') - return self._create(req, body, type=True, user_id=user_id) + return self._create(req, body, key_type=True, user_id=user_id) @wsgi.Controller.api_version("2.2", "2.9") # noqa @wsgi.response(201) @wsgi.expected_errors((400, 403, 409)) @validation.schema(keypairs.create_v22) - def create(self, req, body): + def create(self, req, body): # noqa """Create or import keypair. Sending name will generate a key and return private_key @@ -91,13 +79,13 @@ def create(self, req, body): public_key (optional) - string type (optional) - string """ - return self._create(req, body, type=True) + return self._create(req, body, key_type=True) @wsgi.Controller.api_version("2.1", "2.1") # noqa @wsgi.expected_errors((400, 403, 409)) @validation.schema(keypairs.create_v20, "2.0", "2.0") @validation.schema(keypairs.create, "2.1", "2.1") - def create(self, req, body): + def create(self, req, body): # noqa """Create or import keypair. Sending name will generate a key and return private_key @@ -111,32 +99,26 @@ def create(self, req, body): """ return self._create(req, body) - def _create(self, req, body, user_id=None, **keypair_filters): + def _create(self, req, body, user_id=None, key_type=False): context = req.environ['nova.context'] params = body['keypair'] name = common.normalize_name(params['name']) - key_type = params.get('type', keypair_obj.KEYPAIR_TYPE_SSH) + key_type_value = params.get('type', keypair_obj.KEYPAIR_TYPE_SSH) user_id = user_id or context.user_id context.can(kp_policies.POLICY_ROOT % 'create', - target={'user_id': user_id, - 'project_id': context.project_id}) + target={'user_id': user_id}) + return_priv_key = False try: if 'public_key' in params: - keypair = self.api.import_key_pair(context, - user_id, name, - params['public_key'], key_type) - keypair = self._filter_keypair(keypair, user_id=True, - **keypair_filters) + keypair = self.api.import_key_pair( + context, user_id, name, params['public_key'], + key_type_value) else: keypair, private_key = self.api.create_key_pair( - context, user_id, name, key_type) - keypair = self._filter_keypair(keypair, user_id=True, - **keypair_filters) + context, user_id, name, key_type_value) keypair['private_key'] = private_key - - return {'keypair': keypair} - + return_priv_key = True except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") raise webob.exc.HTTPForbidden(explanation=msg) @@ -145,6 +127,10 @@ def _create(self, req, body, user_id=None, **keypair_filters): except exception.KeyPairExists as exc: raise webob.exc.HTTPConflict(explanation=exc.format_message()) + return self._view_builder.create(keypair, + private_key=return_priv_key, + key_type=key_type) + @wsgi.Controller.api_version("2.1", "2.1") @validation.query_schema(keypairs.delete_query_schema_v20) @wsgi.response(202) @@ -156,14 +142,15 @@ def delete(self, req, id): @validation.query_schema(keypairs.delete_query_schema_v20) @wsgi.response(204) @wsgi.expected_errors(404) - def delete(self, req, id): + def delete(self, req, id): # noqa self._delete(req, id) @wsgi.Controller.api_version("2.10") # noqa - @validation.query_schema(keypairs.delete_query_schema_v210) + @validation.query_schema(keypairs.delete_query_schema_v275, '2.75') + @validation.query_schema(keypairs.delete_query_schema_v210, '2.10', '2.74') @wsgi.response(204) @wsgi.expected_errors(404) - def delete(self, req, id): + def delete(self, req, id): # noqa # handle optional user-id for admin only user_id = self._get_user_id(req) self._delete(req, id, user_id=user_id) @@ -174,8 +161,7 @@ def _delete(self, req, id, user_id=None): # handle optional user-id for admin only user_id = user_id or context.user_id context.can(kp_policies.POLICY_ROOT % 'delete', - target={'user_id': user_id, - 'project_id': context.project_id}) + target={'user_id': user_id}) try: self.api.delete_key_pair(context, user_id, id) except exception.KeypairNotFound as exc: @@ -187,82 +173,73 @@ def _get_user_id(self, req): return user_id @wsgi.Controller.api_version("2.10") - @validation.query_schema(keypairs.show_query_schema_v210) + @validation.query_schema(keypairs.show_query_schema_v275, '2.75') + @validation.query_schema(keypairs.show_query_schema_v210, '2.10', '2.74') @wsgi.expected_errors(404) def show(self, req, id): # handle optional user-id for admin only user_id = self._get_user_id(req) - return self._show(req, id, type=True, user_id=user_id) + return self._show(req, id, key_type=True, user_id=user_id) @wsgi.Controller.api_version("2.2", "2.9") # noqa @validation.query_schema(keypairs.show_query_schema_v20) @wsgi.expected_errors(404) - def show(self, req, id): - return self._show(req, id, type=True) + def show(self, req, id): # noqa + return self._show(req, id, key_type=True) @wsgi.Controller.api_version("2.1", "2.1") # noqa @validation.query_schema(keypairs.show_query_schema_v20) @wsgi.expected_errors(404) - def show(self, req, id): + def show(self, req, id): # noqa return self._show(req, id) - def _show(self, req, id, user_id=None, **keypair_filters): + def _show(self, req, id, key_type=False, user_id=None): """Return data for the given key name.""" context = req.environ['nova.context'] user_id = user_id or context.user_id context.can(kp_policies.POLICY_ROOT % 'show', - target={'user_id': user_id, - 'project_id': context.project_id}) + target={'user_id': user_id}) try: - # The return object needs to be a dict in order to pop the 'type' - # field, if the api_version < 2.2. keypair = self.api.get_key_pair(context, user_id, id) - keypair = self._filter_keypair(keypair, created_at=True, - deleted=True, deleted_at=True, - id=True, user_id=True, - updated_at=True, **keypair_filters) except exception.KeypairNotFound as exc: raise webob.exc.HTTPNotFound(explanation=exc.format_message()) - # TODO(oomichi): It is necessary to filter a response of keypair with - # _filter_keypair() when v2.1+microversions for implementing consistent - # behaviors in this keypair resource. - return {'keypair': keypair} + return self._view_builder.show(keypair, key_type=key_type) @wsgi.Controller.api_version("2.35") - @validation.query_schema(keypairs.index_query_schema_v235) + @validation.query_schema(keypairs.index_query_schema_v275, '2.75') + @validation.query_schema(keypairs.index_query_schema_v235, '2.35', '2.74') @wsgi.expected_errors(400) def index(self, req): user_id = self._get_user_id(req) - return self._index(req, links=True, type=True, user_id=user_id) + return self._index(req, key_type=True, user_id=user_id, links=True) @wsgi.Controller.api_version("2.10", "2.34") # noqa @validation.query_schema(keypairs.index_query_schema_v210) @wsgi.expected_errors(()) - def index(self, req): + def index(self, req): # noqa # handle optional user-id for admin only user_id = self._get_user_id(req) - return self._index(req, type=True, user_id=user_id) + return self._index(req, key_type=True, user_id=user_id) @wsgi.Controller.api_version("2.2", "2.9") # noqa @validation.query_schema(keypairs.index_query_schema_v20) @wsgi.expected_errors(()) - def index(self, req): - return self._index(req, type=True) + def index(self, req): # noqa + return self._index(req, key_type=True) @wsgi.Controller.api_version("2.1", "2.1") # noqa @validation.query_schema(keypairs.index_query_schema_v20) @wsgi.expected_errors(()) - def index(self, req): + def index(self, req): # noqa return self._index(req) - def _index(self, req, user_id=None, links=False, **keypair_filters): + def _index(self, req, key_type=False, user_id=None, links=False): """List of keypairs for a user.""" context = req.environ['nova.context'] user_id = user_id or context.user_id context.can(kp_policies.POLICY_ROOT % 'index', - target={'user_id': user_id, - 'project_id': context.project_id}) + target={'user_id': user_id}) if api_version_request.is_supported(req, min_version='2.35'): limit, marker = common.get_limit_and_marker(req) @@ -275,45 +252,5 @@ def _index(self, req, user_id=None, links=False, **keypair_filters): except exception.MarkerNotFound as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - key_pairs = [self._filter_keypair(key_pair, **keypair_filters) - for key_pair in key_pairs] - - keypairs_list = [{'keypair': key_pair} for key_pair in key_pairs] - keypairs_dict = {'keypairs': keypairs_list} - - if links: - keypairs_links = self._view_builder.get_links(req, key_pairs) - - if keypairs_links: - keypairs_dict['keypairs_links'] = keypairs_links - - return keypairs_dict - - -class Controller(wsgi.Controller): - - def _add_key_name(self, req, servers): - for server in servers: - db_server = req.get_db_instance(server['id']) - # server['id'] is guaranteed to be in the cache due to - # the core API adding it in its 'show'/'detail' methods. - server['key_name'] = db_server['key_name'] - - def _show(self, req, resp_obj): - if 'server' in resp_obj.obj: - server = resp_obj.obj['server'] - self._add_key_name(req, [server]) - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['nova.context'] - if context.can(kp_policies.BASE_POLICY_NAME, fatal=False): - self._show(req, resp_obj) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['nova.context'] - if 'servers' in resp_obj.obj and context.can( - kp_policies.BASE_POLICY_NAME, fatal=False): - servers = resp_obj.obj['servers'] - self._add_key_name(req, servers) + return self._view_builder.index(req, key_pairs, key_type=key_type, + links=links) diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py index 78e1e24066f..e6f901e09dc 100644 --- a/nova/api/openstack/compute/limits.py +++ b/nova/api/openstack/compute/limits.py @@ -51,34 +51,38 @@ def index(self, req): return self._index(req) @wsgi.Controller.api_version(MIN_WITHOUT_PROXY_API_SUPPORT_VERSION, # noqa - MAX_IMAGE_META_PROXY_API_VERSION) # noqa + MAX_IMAGE_META_PROXY_API_VERSION) @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema) - def index(self, req): + def index(self, req): # noqa return self._index(req, FILTERED_LIMITS_2_36) @wsgi.Controller.api_version( # noqa - MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION, '2.56') # noqa + MIN_WITHOUT_IMAGE_META_PROXY_API_VERSION, '2.56') @wsgi.expected_errors(()) @validation.query_schema(limits.limits_query_schema) - def index(self, req): + def index(self, req): # noqa return self._index(req, FILTERED_LIMITS_2_36, max_image_meta=False) @wsgi.Controller.api_version('2.57') # noqa @wsgi.expected_errors(()) - @validation.query_schema(limits.limits_query_schema) - def index(self, req): + @validation.query_schema(limits.limits_query_schema_275, '2.75') + @validation.query_schema(limits.limits_query_schema, '2.57', '2.74') + def index(self, req): # noqa return self._index(req, FILTERED_LIMITS_2_57, max_image_meta=False) def _index(self, req, filtered_limits=None, max_image_meta=True): """Return all global limit information.""" context = req.environ['nova.context'] - context.can(limits_policies.BASE_POLICY_NAME) - project_id = req.params.get('tenant_id', context.project_id) - quotas = QUOTAS.get_project_quotas(context, project_id, - usages=False) - abs_limits = {k: v['limit'] for k, v in quotas.items()} + context.can(limits_policies.BASE_POLICY_NAME, target={}) + project_id = context.project_id + if 'tenant_id' in req.GET: + project_id = req.GET.get('tenant_id') + context.can(limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME, + target={'project_id': project_id}) + quotas = QUOTAS.get_project_quotas(context, project_id, + usages=True) builder = limits_views.ViewBuilder() - return builder.build(abs_limits, filtered_limits=filtered_limits, + return builder.build(req, quotas, filtered_limits=filtered_limits, max_image_meta=max_image_meta) diff --git a/nova/api/openstack/compute/lock_server.py b/nova/api/openstack/compute/lock_server.py index 39d48d1d3cb..5615ba15e8e 100644 --- a/nova/api/openstack/compute/lock_server.py +++ b/nova/api/openstack/compute/lock_server.py @@ -13,20 +13,24 @@ # License for the specific language governing permissions and limitations # under the License. +from nova.api.openstack import api_version_request from nova.api.openstack import common +from nova.api.openstack.compute.schemas import lock_server from nova.api.openstack import wsgi -from nova import compute +from nova.api import validation +from nova.compute import api as compute from nova.policies import lock_server as ls_policies class LockServerController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(LockServerController, self).__init__(*args, **kwargs) + def __init__(self): + super(LockServerController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @wsgi.expected_errors(404) @wsgi.action('lock') + @validation.schema(lock_server.lock_v2_73, "2.73") def _lock(self, req, id, body): """Lock a server instance.""" context = req.environ['nova.context'] @@ -34,7 +38,11 @@ def _lock(self, req, id, body): context.can(ls_policies.POLICY_ROOT % 'lock', target={'user_id': instance.user_id, 'project_id': instance.project_id}) - self.compute_api.lock(context, instance) + reason = None + if (api_version_request.is_supported(req, min_version='2.73') and + body['lock'] is not None): + reason = body['lock'].get('locked_reason') + self.compute_api.lock(context, instance, reason=reason) @wsgi.response(202) @wsgi.expected_errors(404) @@ -42,10 +50,11 @@ def _lock(self, req, id, body): def _unlock(self, req, id, body): """Unlock a server instance.""" context = req.environ['nova.context'] - context.can(ls_policies.POLICY_ROOT % 'unlock') instance = common.get_instance(self.compute_api, context, id) + context.can(ls_policies.POLICY_ROOT % 'unlock', + target={'project_id': instance.project_id}) if not self.compute_api.is_expected_locked_by(context, instance): context.can(ls_policies.POLICY_ROOT % 'unlock:unlock_override', - instance) + target={'project_id': instance.project_id}) self.compute_api.unlock(context, instance) diff --git a/nova/api/openstack/compute/migrate_server.py b/nova/api/openstack/compute/migrate_server.py index 43beee99122..855f51a7c64 100644 --- a/nova/api/openstack/compute/migrate_server.py +++ b/nova/api/openstack/compute/migrate_server.py @@ -23,7 +23,7 @@ from nova.api.openstack.compute.schemas import migrate_server from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import migrate_server as ms_policies @@ -32,8 +32,8 @@ class MigrateServerController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(MigrateServerController, self).__init__(*args, **kwargs) + def __init__(self): + super(MigrateServerController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @@ -43,41 +43,60 @@ def __init__(self, *args, **kwargs): def _migrate(self, req, id, body): """Permit admins to migrate a server to a new host.""" context = req.environ['nova.context'] - context.can(ms_policies.POLICY_ROOT % 'migrate') + + instance = common.get_instance(self.compute_api, context, id, + expected_attrs=['flavor', 'services']) + context.can(ms_policies.POLICY_ROOT % 'migrate', + target={'project_id': instance.project_id}) host_name = None if (api_version_request.is_supported(req, min_version='2.56') and body['migrate'] is not None): host_name = body['migrate'].get('host') - instance = common.get_instance(self.compute_api, context, id) try: self.compute_api.resize(req.environ['nova.context'], instance, host_name=host_name) except (exception.TooManyInstances, exception.QuotaError) as e: raise exc.HTTPForbidden(explanation=e.format_message()) - except (exception.InstanceIsLocked, - exception.CannotMigrateWithTargetHost) as e: + except ( + exception.InstanceIsLocked, + exception.InstanceNotReady, + exception.ServiceUnavailable, + ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'migrate', id) except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) - except (exception.NoValidHost, exception.ComputeHostNotFound, - exception.CannotMigrateToSameHost) as e: + except ( + exception.ComputeHostNotFound, + exception.CannotMigrateToSameHost, + exception.ForbiddenPortsWithAccelerator, + exception.ExtendedResourceRequestOldCompute, + ) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) @wsgi.response(202) - @wsgi.expected_errors((400, 404, 409)) + @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('os-migrateLive') @validation.schema(migrate_server.migrate_live, "2.0", "2.24") @validation.schema(migrate_server.migrate_live_v2_25, "2.25", "2.29") - @validation.schema(migrate_server.migrate_live_v2_30, "2.30") + @validation.schema(migrate_server.migrate_live_v2_30, "2.30", "2.67") + @validation.schema(migrate_server.migrate_live_v2_68, "2.68") def _migrate_live(self, req, id, body): """Permit admins to (live) migrate a server to a new host.""" context = req.environ["nova.context"] - context.can(ms_policies.POLICY_ROOT % 'migrate_live') + + # NOTE(stephenfin): we need 'numa_topology' because of the + # 'LiveMigrationTask._check_instance_has_no_numa' check in the + # conductor + instance = common.get_instance(self.compute_api, context, id, + expected_attrs=['numa_topology']) + + context.can(ms_policies.POLICY_ROOT % 'migrate_live', + target={'project_id': instance.project_id}) host = body["os-migrateLive"]["host"] block_migration = body["os-migrateLive"]["block_migration"] @@ -100,13 +119,10 @@ def _migrate_live(self, req, id, body): disk_over_commit = strutils.bool_from_string(disk_over_commit, strict=True) - instance = common.get_instance(self.compute_api, context, id) try: self.compute_api.live_migrate(context, instance, block_migration, disk_over_commit, host, force, async_) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except (exception.NoValidHost, exception.ComputeServiceUnavailable, exception.InvalidHypervisorType, @@ -116,7 +132,8 @@ def _migrate_live(self, req, id, body): exception.InvalidLocalStorage, exception.InvalidSharedStorage, exception.HypervisorUnavailable, - exception.MigrationPreCheckError) as ex: + exception.MigrationPreCheckError, + exception.ForbiddenPortsWithAccelerator) as ex: if async_: with excutils.save_and_reraise_exception(): LOG.error("Unexpected exception received from " @@ -126,7 +143,10 @@ def _migrate_live(self, req, id, body): raise exc.HTTPBadRequest(explanation=ex.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) - except exception.ComputeHostNotFound as e: + except ( + exception.ComputeHostNotFound, + exception.ExtendedResourceRequestOldCompute, + )as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, diff --git a/nova/api/openstack/compute/migrations.py b/nova/api/openstack/compute/migrations.py index e4a6a6edfbd..cb97a1498af 100644 --- a/nova/api/openstack/compute/migrations.py +++ b/nova/api/openstack/compute/migrations.py @@ -13,14 +13,17 @@ from oslo_utils import timeutils from webob import exc +from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import migrations as schema_migrations from nova.api.openstack.compute.views import migrations as migrations_view from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception +from nova.i18n import _ from nova.objects import base as obj_base +from nova.objects import fields from nova.policies import migrations as migrations_policies @@ -34,7 +37,8 @@ def __init__(self): super(MigrationsController, self).__init__() self.compute_api = compute.API() - def _output(self, req, migrations_obj, add_link=False, add_uuid=False): + def _output(self, req, migrations_obj, add_link=False, + add_uuid=False, add_user_project=False): """Returns the desired output of the API from an object. From a MigrationsList's object this method returns a list of @@ -55,15 +59,23 @@ def _output(self, req, migrations_obj, add_link=False, add_uuid=False): del obj['deleted'] del obj['deleted_at'] del obj['hidden'] + del obj['cross_cell_move'] if not add_uuid: del obj['uuid'] if 'memory_total' in obj: for key in detail_keys: del obj[key] + if not add_user_project: + if 'user_id' in obj: + del obj['user_id'] + if 'project_id' in obj: + del obj['project_id'] # NOTE(Shaohe Feng) above version 2.23, add migration_type for all # kinds of migration, but we only add links just for in-progress # live-migration. - if add_link and obj['migration_type'] == "live-migration" and ( + if (add_link and + obj['migration_type'] == + fields.MigrationType.LIVE_MIGRATION and obj["status"] in live_migration_in_progress): obj["links"] = self._view_builder._get_links( req, obj["id"], @@ -75,9 +87,9 @@ def _output(self, req, migrations_obj, add_link=False, add_uuid=False): def _index(self, req, add_link=False, next_link=False, add_uuid=False, sort_dirs=None, sort_keys=None, limit=None, marker=None, - allow_changes_since=False): + allow_changes_since=False, allow_changes_before=False): context = req.environ['nova.context'] - context.can(migrations_policies.POLICY_ROOT % 'index') + context.can(migrations_policies.POLICY_ROOT % 'index', target={}) search_opts = {} search_opts.update(req.GET) if 'changes-since' in search_opts: @@ -92,6 +104,23 @@ def _index(self, req, add_link=False, next_link=False, add_uuid=False, # it from search_opts. del search_opts['changes-since'] + if 'changes-before' in search_opts: + if allow_changes_before: + search_opts['changes-before'] = timeutils.parse_isotime( + search_opts['changes-before']) + changes_since = search_opts.get('changes-since') + if (changes_since and search_opts['changes-before'] < + search_opts['changes-since']): + msg = _('The value of changes-since must be less than ' + 'or equal to changes-before.') + raise exc.HTTPBadRequest(explanation=msg) + else: + # Before microversion 2.59 the schema allowed + # additionalProperties=True, so a user could pass + # changes-before before 2.59 and filter by the updated_at + # field if we don't remove it from search_opts. + del search_opts['changes-before'] + if sort_keys: try: migrations = self.compute_api.get_migrations_sorted( @@ -104,7 +133,9 @@ def _index(self, req, add_link=False, next_link=False, add_uuid=False, migrations = self.compute_api.get_migrations( context, search_opts) - migrations = self._output(req, migrations, add_link, add_uuid) + add_user_project = api_version_request.is_supported(req, '2.80') + migrations = self._output(req, migrations, add_link, + add_uuid, add_user_project) migrations_dict = {'migrations': migrations} if next_link: @@ -125,15 +156,15 @@ def index(self, req): @wsgi.expected_errors(()) @validation.query_schema(schema_migrations.list_query_schema_v20, "2.23", "2.58") - def index(self, req): + def index(self, req): # noqa """Return all migrations using the query parameters as filters.""" return self._index(req, add_link=True) - @wsgi.Controller.api_version("2.59") # noqa + @wsgi.Controller.api_version("2.59", "2.65") # noqa @wsgi.expected_errors(400) @validation.query_schema(schema_migrations.list_query_params_v259, - "2.59") - def index(self, req): + "2.59", "2.65") + def index(self, req): # noqa """Return all migrations using the query parameters as filters.""" limit, marker = common.get_limit_and_marker(req) return self._index(req, add_link=True, next_link=True, add_uuid=True, @@ -141,3 +172,19 @@ def index(self, req): sort_dirs=['desc', 'desc'], limit=limit, marker=marker, allow_changes_since=True) + + @wsgi.Controller.api_version("2.66") # noqa + @wsgi.expected_errors(400) + @validation.query_schema(schema_migrations.list_query_params_v266, + "2.66", "2.79") + @validation.query_schema(schema_migrations.list_query_params_v280, + "2.80") + def index(self, req): # noqa + """Return all migrations using the query parameters as filters.""" + limit, marker = common.get_limit_and_marker(req) + return self._index(req, add_link=True, next_link=True, add_uuid=True, + sort_keys=['created_at', 'id'], + sort_dirs=['desc', 'desc'], + limit=limit, marker=marker, + allow_changes_since=True, + allow_changes_before=True) diff --git a/nova/api/openstack/compute/multinic.py b/nova/api/openstack/compute/multinic.py index e554192ed30..afe11e0baa1 100644 --- a/nova/api/openstack/compute/multinic.py +++ b/nova/api/openstack/compute/multinic.py @@ -21,7 +21,7 @@ from nova.api.openstack.compute.schemas import multinic from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import multinic as multinic_policies @@ -29,8 +29,8 @@ class MultinicController(wsgi.Controller): """This API is deprecated from Microversion '2.44'.""" - def __init__(self, *args, **kwargs): - super(MultinicController, self).__init__(*args, **kwargs) + def __init__(self): + super(MultinicController, self).__init__() self.compute_api = compute.API() @wsgi.Controller.api_version("2.1", "2.43") @@ -41,14 +41,13 @@ def __init__(self, *args, **kwargs): def _add_fixed_ip(self, req, id, body): """Adds an IP on a given network to an instance.""" context = req.environ['nova.context'] - context.can(multinic_policies.BASE_POLICY_NAME) - instance = common.get_instance(self.compute_api, context, id) + context.can(multinic_policies.BASE_POLICY_NAME % 'add', + target={'project_id': instance.project_id}) + network_id = body['addFixedIp']['networkId'] try: self.compute_api.add_fixed_ip(context, instance, network_id) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.NoMoreFixedIps as e: raise exc.HTTPBadRequest(explanation=e.format_message()) @@ -60,14 +59,13 @@ def _add_fixed_ip(self, req, id, body): def _remove_fixed_ip(self, req, id, body): """Removes an IP from an instance.""" context = req.environ['nova.context'] - context.can(multinic_policies.BASE_POLICY_NAME) - instance = common.get_instance(self.compute_api, context, id) + context.can(multinic_policies.BASE_POLICY_NAME % 'remove', + target={'project_id': instance.project_id}) + address = body['removeFixedIp']['address'] try: self.compute_api.remove_fixed_ip(context, instance, address) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.FixedIpNotFoundForSpecificInstance as e: + except exception.FixedIpNotFoundForInstance as e: raise exc.HTTPBadRequest(explanation=e.format_message()) diff --git a/nova/api/openstack/compute/networks.py b/nova/api/openstack/compute/networks.py index cff450941b4..457febe44f2 100644 --- a/nova/api/openstack/compute/networks.py +++ b/nova/api/openstack/compute/networks.py @@ -14,24 +14,21 @@ # License for the specific language governing permissions and limitations # under the License. -import netaddr from webob import exc from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION -from nova.api.openstack import common -from nova.api.openstack.compute.schemas import networks as schema from nova.api.openstack import wsgi -from nova.api import validation from nova import exception from nova.i18n import _ -from nova import network -from nova.objects import base as base_obj -from nova.objects import fields as obj_fields +from nova.network import neutron from nova.policies import networks as net_policies def network_dict(context, network): + if not network: + return {} + fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2', 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6') admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted', @@ -40,77 +37,51 @@ def network_dict(context, network): 'project_id', 'host', 'bridge_interface', 'multi_host', 'priority', 'rxtx_base', 'mtu', 'dhcp_server', 'enable_dhcp', 'share_address') - if network: - # NOTE(mnaser): We display a limited set of fields so users can know - # what networks are available, extra system-only fields - # are only visible if they are an admin. - if context.is_admin: - fields += admin_fields - # TODO(mriedem): Remove the NovaObject type check once the - # network.create API is returning objects. - is_obj = isinstance(network, base_obj.NovaObject) - result = {} - for field in fields: - # NOTE(mriedem): If network is an object, IPAddress fields need to - # be cast to a string so they look the same in the response as - # before the objects conversion. - if is_obj and isinstance(network.fields[field].AUTO_TYPE, - obj_fields.IPAddress): - # NOTE(danms): Here, network should be an object, which could - # have come from neutron and thus be missing most of the - # attributes. Providing a default to get() avoids trying to - # lazy-load missing attributes. - val = network.get(field, None) - if val is not None: - result[field] = str(val) - else: - result[field] = val - else: - # It's either not an object or it's not an IPAddress field. - result[field] = network.get(field, None) - uuid = network.get('uuid') - if uuid: - result['id'] = uuid - return result - else: - return {} + + # NOTE(mnaser): We display a limited set of fields so users can know what + # networks are available, extra system-only fields are only visible if they + # are an admin. + + if context.is_admin: + fields += admin_fields + + result = {} + for field in fields: + # we only provide a limited number of fields now that nova-network is + # gone (yes, two fields of thirty) + if field == 'id': + result[field] = network['id'] + elif field == 'label': + result[field] = network['name'] + else: + result[field] = None + + return result class NetworkController(wsgi.Controller): def __init__(self, network_api=None): - self.network_api = network_api or network.API() + super(NetworkController, self).__init__() + # TODO(stephenfin): 'network_api' is only being passed for use by tests + self.network_api = network_api or neutron.API() @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) def index(self, req): context = req.environ['nova.context'] - context.can(net_policies.POLICY_ROOT % 'view') + context.can(net_policies.POLICY_ROOT % 'list', + target={'project_id': context.project_id}) networks = self.network_api.get_all(context) result = [network_dict(context, net_ref) for net_ref in networks] return {'networks': result} - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.response(202) - @wsgi.expected_errors((404, 501)) - @wsgi.action("disassociate") - def _disassociate_host_and_project(self, req, id, body): - context = req.environ['nova.context'] - context.can(net_policies.BASE_POLICY_NAME) - - try: - self.network_api.associate(context, id, host=None, project=None) - except exception.NetworkNotFound: - msg = _("Network not found") - raise exc.HTTPNotFound(explanation=msg) - except NotImplementedError: - common.raise_feature_not_supported() - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(404) def show(self, req, id): context = req.environ['nova.context'] - context.can(net_policies.POLICY_ROOT % 'view') + context.can(net_policies.POLICY_ROOT % 'show', + target={'project_id': context.project_id}) try: network = self.network_api.get(context, id) @@ -119,63 +90,19 @@ def show(self, req, id): raise exc.HTTPNotFound(explanation=msg) return {'network': network_dict(context, network)} - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.response(202) - @wsgi.expected_errors((404, 409)) - def delete(self, req, id): - context = req.environ['nova.context'] - context.can(net_policies.BASE_POLICY_NAME) + @wsgi.expected_errors(410) + @wsgi.action("disassociate") + def _disassociate_host_and_project(self, req, id, body): + raise exc.HTTPGone() - try: - self.network_api.delete(context, id) - except exception.NetworkInUse as e: - raise exc.HTTPConflict(explanation=e.format_message()) - except exception.NetworkNotFound: - msg = _("Network not found") - raise exc.HTTPNotFound(explanation=msg) + @wsgi.expected_errors(410) + def delete(self, req, id): + raise exc.HTTPGone() - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.expected_errors((400, 409, 501)) - @validation.schema(schema.create) + @wsgi.expected_errors(410) def create(self, req, body): - context = req.environ['nova.context'] - context.can(net_policies.BASE_POLICY_NAME) - - params = body["network"] + raise exc.HTTPGone() - cidr = params.get("cidr") or params.get("cidr_v6") - - params["num_networks"] = 1 - params["network_size"] = netaddr.IPNetwork(cidr).size - - try: - network = self.network_api.create(context, **params)[0] - except (exception.InvalidCidr, - exception.InvalidIntValue, - exception.InvalidAddress, - exception.NetworkNotCreated) as ex: - raise exc.HTTPBadRequest(explanation=ex.format_message) - except (exception.CidrConflict, - exception.DuplicateVlan) as ex: - raise exc.HTTPConflict(explanation=ex.format_message()) - return {"network": network_dict(context, network)} - - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.response(202) - @wsgi.expected_errors((400, 501)) - @validation.schema(schema.add_network_to_project) + @wsgi.expected_errors(410) def add(self, req, body): - context = req.environ['nova.context'] - context.can(net_policies.BASE_POLICY_NAME) - - network_id = body['id'] - project_id = context.project_id - - try: - self.network_api.add_network_to_project( - context, project_id, network_id) - except NotImplementedError: - common.raise_feature_not_supported() - except (exception.NoMoreNetworks, - exception.NetworkNotFoundForUUID) as e: - raise exc.HTTPBadRequest(explanation=e.format_message()) + raise exc.HTTPGone() diff --git a/nova/api/openstack/compute/networks_associate.py b/nova/api/openstack/compute/networks_associate.py index 27a128d92c3..e49b1ae9e40 100644 --- a/nova/api/openstack/compute/networks_associate.py +++ b/nova/api/openstack/compute/networks_associate.py @@ -12,68 +12,23 @@ from webob import exc -from nova.api.openstack.api_version_request \ - import MAX_PROXY_API_SUPPORT_VERSION -from nova.api.openstack import common -from nova.api.openstack.compute.schemas import networks_associate from nova.api.openstack import wsgi -from nova.api import validation -from nova import exception -from nova.i18n import _ -from nova import network -from nova.policies import networks_associate as na_policies class NetworkAssociateActionController(wsgi.Controller): """Network Association API Controller.""" - def __init__(self, network_api=None): - self.network_api = network_api or network.API() - - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.action("disassociate_host") - @wsgi.response(202) - @wsgi.expected_errors((404, 501)) + @wsgi.expected_errors(410) def _disassociate_host_only(self, req, id, body): - context = req.environ['nova.context'] - context.can(na_policies.BASE_POLICY_NAME) - try: - self.network_api.associate(context, id, host=None) - except exception.NetworkNotFound: - msg = _("Network not found") - raise exc.HTTPNotFound(explanation=msg) - except NotImplementedError: - common.raise_feature_not_supported() + raise exc.HTTPGone() - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.action("disassociate_project") - @wsgi.response(202) - @wsgi.expected_errors((404, 501)) + @wsgi.expected_errors(410) def _disassociate_project_only(self, req, id, body): - context = req.environ['nova.context'] - context.can(na_policies.BASE_POLICY_NAME) - try: - self.network_api.associate(context, id, project=None) - except exception.NetworkNotFound: - msg = _("Network not found") - raise exc.HTTPNotFound(explanation=msg) - except NotImplementedError: - common.raise_feature_not_supported() + raise exc.HTTPGone() - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.action("associate_host") - @wsgi.response(202) - @wsgi.expected_errors((404, 501)) - @validation.schema(networks_associate.associate_host) + @wsgi.expected_errors(410) def _associate_host(self, req, id, body): - context = req.environ['nova.context'] - context.can(na_policies.BASE_POLICY_NAME) - - try: - self.network_api.associate(context, id, - host=body['associate_host']) - except exception.NetworkNotFound: - msg = _("Network not found") - raise exc.HTTPNotFound(explanation=msg) - except NotImplementedError: - common.raise_feature_not_supported() + raise exc.HTTPGone() diff --git a/nova/api/openstack/compute/pause_server.py b/nova/api/openstack/compute/pause_server.py index ee6852e02bf..4d6bbc6cf14 100644 --- a/nova/api/openstack/compute/pause_server.py +++ b/nova/api/openstack/compute/pause_server.py @@ -17,14 +17,14 @@ from nova.api.openstack import common from nova.api.openstack import wsgi -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import pause_server as ps_policies class PauseServerController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(PauseServerController, self).__init__(*args, **kwargs) + def __init__(self): + super(PauseServerController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) @@ -44,8 +44,7 @@ def _pause(self, req, id, body): except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'pause', id) - except (exception.InstanceUnknownCell, - exception.InstanceNotFound) as e: + except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() @@ -56,8 +55,9 @@ def _pause(self, req, id, body): def _unpause(self, req, id, body): """Permit Admins to unpause the server.""" ctxt = req.environ['nova.context'] - ctxt.can(ps_policies.POLICY_ROOT % 'unpause') server = common.get_instance(self.compute_api, ctxt, id) + ctxt.can(ps_policies.POLICY_ROOT % 'unpause', + target={'project_id': server.project_id}) try: self.compute_api.unpause(ctxt, server) except exception.InstanceIsLocked as e: @@ -65,8 +65,7 @@ def _unpause(self, req, id, body): except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'unpause', id) - except (exception.InstanceUnknownCell, - exception.InstanceNotFound) as e: + except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except NotImplementedError: common.raise_feature_not_supported() diff --git a/nova/api/openstack/compute/quota_classes.py b/nova/api/openstack/compute/quota_classes.py index 3a0f57bc967..dddba2c5993 100644 --- a/nova/api/openstack/compute/quota_classes.py +++ b/nova/api/openstack/compute/quota_classes.py @@ -34,7 +34,7 @@ # NOTE(gmann): Network related quotas are filter out in # microversion 2.50. Bug#1701211. -FILTERED_QUOTAS_2_50 = ["fixed_ips", "floating_ips", "networks", +FILTERED_QUOTAS_2_50 = ["fixed_ips", "floating_ips", "security_group_rules", "security_groups"] # Microversion 2.57 removes personality (injected) files from the API. @@ -45,9 +45,8 @@ class QuotaClassSetsController(wsgi.Controller): - supported_quotas = [] - - def __init__(self, **kwargs): + def __init__(self): + super(QuotaClassSetsController, self).__init__() self.supported_quotas = QUOTAS.resources def _format_quota_set(self, quota_class, quota_set, filtered_quotas=None, @@ -72,6 +71,10 @@ def _format_quota_set(self, quota_class, quota_set, filtered_quotas=None, if resource in quota_set: result[resource] = quota_set[resource] + # Custom Quota Support + if quota_class != 'default': + result = copy.copy(quota_set) + return dict(quota_class_set=result) @wsgi.Controller.api_version('2.1', '2.49') @@ -81,18 +84,19 @@ def show(self, req, id): @wsgi.Controller.api_version('2.50', '2.56') # noqa @wsgi.expected_errors(()) - def show(self, req, id): + def show(self, req, id): # noqa return self._show(req, id, FILTERED_QUOTAS_2_50) @wsgi.Controller.api_version('2.57') # noqa @wsgi.expected_errors(()) - def show(self, req, id): + def show(self, req, id): # noqa return self._show(req, id, FILTERED_QUOTAS_2_57) def _show(self, req, id, filtered_quotas=None, exclude_server_groups=False): + QUOTAS.initialize() context = req.environ['nova.context'] - context.can(qcs_policies.POLICY_ROOT % 'show', {'quota_class': id}) + context.can(qcs_policies.POLICY_ROOT % 'show', target={}) values = QUOTAS.get_class_quotas(context, id) return self._format_quota_set(id, values, filtered_quotas, exclude_server_groups) @@ -106,19 +110,20 @@ def update(self, req, id, body): @wsgi.Controller.api_version("2.50", "2.56") # noqa @wsgi.expected_errors(400) @validation.schema(quota_classes.update_v250) - def update(self, req, id, body): + def update(self, req, id, body): # noqa return self._update(req, id, body, FILTERED_QUOTAS_2_50) @wsgi.Controller.api_version("2.57") # noqa @wsgi.expected_errors(400) @validation.schema(quota_classes.update_v257) - def update(self, req, id, body): + def update(self, req, id, body): # noqa return self._update(req, id, body, FILTERED_QUOTAS_2_57) def _update(self, req, id, body, filtered_quotas=None, exclude_server_groups=False): + QUOTAS.initialize() context = req.environ['nova.context'] - context.can(qcs_policies.POLICY_ROOT % 'update', {'quota_class': id}) + context.can(qcs_policies.POLICY_ROOT % 'update', target={}) try: utils.check_string_length(id, 'quota_class_name', min_length=1, max_length=255) @@ -137,3 +142,26 @@ def _update(self, req, id, body, filtered_quotas=None, values = QUOTAS.get_class_quotas(context, quota_class) return self._format_quota_set(None, values, filtered_quotas, exclude_server_groups) + + @wsgi.Controller.api_version('2.1') + @wsgi.response(201) + @wsgi.expected_errors((400)) + def create(self, req, id, body): + context = req.environ['nova.context'] + class_set = body['quota_class_set'] + + for key, value in class_set.items(): + try: + objects.Quotas.create_class(context, id, key, value) + except exception.QuotaClassExists: + pass + + values = QUOTAS.get_class_quotas(context, id) + return self._format_quota_set(None, values, None) + + @wsgi.Controller.api_version('2.1') + @wsgi.response(204) + @wsgi.expected_errors((404, 409)) + def delete(self, req, id, body): + context = req.environ['nova.context'] + objects.Quotas.delete_class(context, id) diff --git a/nova/api/openstack/compute/quota_sets.py b/nova/api/openstack/compute/quota_sets.py index e460d93f9b9..d1a1e32f087 100644 --- a/nova/api/openstack/compute/quota_sets.py +++ b/nova/api/openstack/compute/quota_sets.py @@ -13,9 +13,9 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_utils import strutils +from urllib import parse as urlparse -import six.moves.urllib.parse as urlparse +from oslo_utils import strutils import webob from nova.api.openstack.api_version_request \ @@ -37,7 +37,7 @@ CONF = nova.conf.CONF QUOTAS = quota.QUOTAS -FILTERED_QUOTAS_2_36 = ["fixed_ips", "floating_ips", "networks", +FILTERED_QUOTAS_2_36 = ["fixed_ips", "floating_ips", "security_group_rules", "security_groups"] FILTERED_QUOTAS_2_57 = list(FILTERED_QUOTAS_2_36) @@ -47,14 +47,15 @@ class QuotaSetsController(wsgi.Controller): - def _format_quota_set(self, project_id, quota_set, filtered_quotas): + def _format_quota_set(self, context, project_id, quota_set, + filtered_quotas): """Convert the quota object to a result dict.""" if project_id: result = dict(id=str(project_id)) else: result = {} - for resource in QUOTAS.resources: + for resource in QUOTAS.combined_resources(context): if (resource not in filtered_quotas and resource in quota_set): result[resource] = quota_set[resource] @@ -80,6 +81,9 @@ def _get_quotas(self, context, id, user_id=None, usages=False): if user_id: values = QUOTAS.get_user_quotas(context, id, user_id, usages=usages) + values.update(QUOTAS.get_user_quotas(context, id, user_id, + usages=usages, + quota_class='flavors')) else: values = QUOTAS.get_project_quotas(context, id, usages=usages) @@ -112,15 +116,16 @@ def show(self, req, id): @wsgi.Controller.api_version( # noqa MIN_WITHOUT_PROXY_API_SUPPORT_VERSION, '2.56') @wsgi.expected_errors(400) - def show(self, req, id): + def show(self, req, id): # noqa return self._show(req, id, FILTERED_QUOTAS_2_36) @wsgi.Controller.api_version('2.57') # noqa @wsgi.expected_errors(400) - def show(self, req, id): + def show(self, req, id): # noqa return self._show(req, id, FILTERED_QUOTAS_2_57) - @validation.query_schema(quota_sets.query_schema) + @validation.query_schema(quota_sets.query_schema_275, '2.75') + @validation.query_schema(quota_sets.query_schema, '2.0', '2.74') def _show(self, req, id, filtered_quotas): context = req.environ['nova.context'] context.can(qs_policies.POLICY_ROOT % 'show', {'project_id': id}) @@ -128,7 +133,7 @@ def _show(self, req, id, filtered_quotas): params = urlparse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] - return self._format_quota_set(id, + return self._format_quota_set(context, id, self._get_quotas(context, id, user_id=user_id), filtered_quotas=filtered_quotas) @@ -140,15 +145,16 @@ def detail(self, req, id): @wsgi.Controller.api_version( # noqa MIN_WITHOUT_PROXY_API_SUPPORT_VERSION, '2.56') @wsgi.expected_errors(400) - def detail(self, req, id): + def detail(self, req, id): # noqa return self._detail(req, id, FILTERED_QUOTAS_2_36) @wsgi.Controller.api_version('2.57') # noqa @wsgi.expected_errors(400) - def detail(self, req, id): + def detail(self, req, id): # noqa return self._detail(req, id, FILTERED_QUOTAS_2_57) - @validation.query_schema(quota_sets.query_schema) + @validation.query_schema(quota_sets.query_schema_275, '2.75') + @validation.query_schema(quota_sets.query_schema, '2.0', '2.74') def _detail(self, req, id, filtered_quotas): context = req.environ['nova.context'] context.can(qs_policies.POLICY_ROOT % 'detail', {'project_id': id}) @@ -156,6 +162,7 @@ def _detail(self, req, id, filtered_quotas): user_id = req.GET.get('user_id', None) return self._format_quota_set( + context, id, self._get_quotas(context, id, user_id=user_id, usages=True), filtered_quotas=filtered_quotas) @@ -170,16 +177,17 @@ def update(self, req, id, body): MIN_WITHOUT_PROXY_API_SUPPORT_VERSION, '2.56') @wsgi.expected_errors(400) @validation.schema(quota_sets.update_v236) - def update(self, req, id, body): + def update(self, req, id, body): # noqa return self._update(req, id, body, FILTERED_QUOTAS_2_36) @wsgi.Controller.api_version('2.57') # noqa @wsgi.expected_errors(400) @validation.schema(quota_sets.update_v257) - def update(self, req, id, body): + def update(self, req, id, body): # noqa return self._update(req, id, body, FILTERED_QUOTAS_2_57) - @validation.query_schema(quota_sets.query_schema) + @validation.query_schema(quota_sets.query_schema_275, '2.75') + @validation.query_schema(quota_sets.query_schema, '2.0', '2.74') def _update(self, req, id, body, filtered_quotas): context = req.environ['nova.context'] context.can(qs_policies.POLICY_ROOT % 'update', {'project_id': id}) @@ -191,13 +199,11 @@ def _update(self, req, id, body, filtered_quotas): quota_set = body['quota_set'] - # NOTE(alex_xu): The CONF.enable_network_quota was deprecated - # due to it is only used by nova-network, and nova-network will be - # deprecated also. So when CONF.enable_newtork_quota is removed, - # the networks quota will disappeare also. - if not CONF.enable_network_quota and 'networks' in quota_set: + # NOTE(stephenfin): network quotas were only used by nova-network and + # therefore should be explicitly rejected + if 'networks' in quota_set: raise webob.exc.HTTPBadRequest( - explanation=_('The networks quota is disabled')) + explanation=_('The networks quota has been removed')) force_update = strutils.bool_from_string(quota_set.get('force', 'False')) @@ -214,6 +220,15 @@ def _update(self, req, id, body, filtered_quotas): # quota, this check will be ignored if admin want to force # update value = int(value) + + print(key) + print(settable_quotas) + if key not in settable_quotas: + raise webob.exc.HTTPBadRequest( + explanation=_('The quota {} is not available, please ' + 'restart nova-api if recently seeded ' + 'flavor').format(key)) + if not force_update: minimum = settable_quotas[key]['minimum'] maximum = settable_quotas[key]['maximum'] @@ -234,6 +249,7 @@ def _update(self, req, id, body, filtered_quotas): # Note(gmann): Removed 'id' from update's response to make it same # as V2. If needed it can be added with microversion. return self._format_quota_set( + context, None, self._get_quotas(context, id, user_id=user_id), filtered_quotas=filtered_quotas) @@ -246,12 +262,12 @@ def defaults(self, req, id): @wsgi.Controller.api_version( # noqa MIN_WITHOUT_PROXY_API_SUPPORT_VERSION, '2.56') @wsgi.expected_errors(400) - def defaults(self, req, id): + def defaults(self, req, id): # noqa return self._defaults(req, id, FILTERED_QUOTAS_2_36) @wsgi.Controller.api_version('2.57') # noqa @wsgi.expected_errors(400) - def defaults(self, req, id): + def defaults(self, req, id): # noqa return self._defaults(req, id, FILTERED_QUOTAS_2_57) def _defaults(self, req, id, filtered_quotas): @@ -260,14 +276,15 @@ def _defaults(self, req, id, filtered_quotas): identity.verify_project_id(context, id) values = QUOTAS.get_defaults(context) - return self._format_quota_set(id, values, + return self._format_quota_set(context, id, values, filtered_quotas=filtered_quotas) # TODO(oomichi): Here should be 204(No Content) instead of 202 by v2.1 # +microversions because the resource quota-set has been deleted completely # when returning a response. @wsgi.expected_errors(()) - @validation.query_schema(quota_sets.query_schema) + @validation.query_schema(quota_sets.query_schema_275, '2.75') + @validation.query_schema(quota_sets.query_schema, '2.0', '2.74') @wsgi.response(202) def delete(self, req, id): context = req.environ['nova.context'] @@ -275,7 +292,7 @@ def delete(self, req, id): params = urlparse.parse_qs(req.environ.get('QUERY_STRING', '')) user_id = params.get('user_id', [None])[0] if user_id: - QUOTAS.destroy_all_by_project_and_user(context, - id, user_id) + objects.Quotas.destroy_all_by_project_and_user( + context, id, user_id) else: - QUOTAS.destroy_all_by_project(context, id) + objects.Quotas.destroy_all_by_project(context, id) diff --git a/nova/api/openstack/compute/remote_consoles.py b/nova/api/openstack/compute/remote_consoles.py index 625a8083d6e..36015542aa3 100644 --- a/nova/api/openstack/compute/remote_consoles.py +++ b/nova/api/openstack/compute/remote_consoles.py @@ -18,20 +18,20 @@ from nova.api.openstack.compute.schemas import remote_consoles from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import remote_consoles as rc_policies class RemoteConsolesController(wsgi.Controller): - def __init__(self, *args, **kwargs): + def __init__(self): + super(RemoteConsolesController, self).__init__() self.compute_api = compute.API() self.handlers = {'vnc': self.compute_api.get_vnc_console, 'spice': self.compute_api.get_spice_console, 'rdp': self.compute_api.get_rdp_console, 'serial': self.compute_api.get_serial_console, 'mks': self.compute_api.get_mks_console} - super(RemoteConsolesController, self).__init__(*args, **kwargs) @wsgi.Controller.api_version("2.1", "2.5") @wsgi.expected_errors((400, 404, 409, 501)) @@ -52,8 +52,7 @@ def get_vnc_console(self, req, id, body): console_type) except exception.ConsoleTypeUnavailable as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except (exception.InstanceUnknownCell, - exception.InstanceNotFound) as e: + except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) @@ -81,8 +80,7 @@ def get_spice_console(self, req, id, body): console_type) except exception.ConsoleTypeUnavailable as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except (exception.InstanceUnknownCell, - exception.InstanceNotFound) as e: + except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) @@ -112,8 +110,7 @@ def get_rdp_console(self, req, id, body): console_type) except exception.ConsoleTypeUnavailable as e: raise webob.exc.HTTPBadRequest(explanation=e.format_message()) - except (exception.InstanceUnknownCell, - exception.InstanceNotFound) as e: + except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) @@ -138,8 +135,7 @@ def get_serial_console(self, req, id, body): output = self.compute_api.get_serial_console(context, instance, console_type) - except (exception.InstanceUnknownCell, - exception.InstanceNotFound) as e: + except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceNotReady as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) @@ -159,8 +155,9 @@ def get_serial_console(self, req, id, body): @validation.schema(remote_consoles.create_v28, "2.8") def create(self, req, server_id, body): context = req.environ['nova.context'] - context.can(rc_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, server_id) + context.can(rc_policies.BASE_POLICY_NAME, + target={'project_id': instance.project_id}) protocol = body['remote_console']['protocol'] console_type = body['remote_console']['type'] try: diff --git a/nova/api/openstack/compute/rescue.py b/nova/api/openstack/compute/rescue.py index d1f64953e72..baf3510920b 100644 --- a/nova/api/openstack/compute/rescue.py +++ b/nova/api/openstack/compute/rescue.py @@ -16,11 +16,12 @@ from webob import exc +from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.schemas import rescue from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute import nova.conf from nova import exception from nova.policies import rescue as rescue_policies @@ -30,8 +31,8 @@ class RescueController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(RescueController, self).__init__(*args, **kwargs) + def __init__(self): + super(RescueController, self).__init__() self.compute_api = compute.API() # TODO(cyeoh): Should be responding here with 202 Accept @@ -56,23 +57,25 @@ def _rescue(self, req, id, body): rescue_image_ref = None if body['rescue']: rescue_image_ref = body['rescue'].get('rescue_image_ref') - + allow_bfv_rescue = api_version_request.is_supported(req, '2.87') try: self.compute_api.rescue(context, instance, rescue_password=password, - rescue_image_ref=rescue_image_ref) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.InstanceIsLocked as e: + rescue_image_ref=rescue_image_ref, + allow_bfv_rescue=allow_bfv_rescue) + except ( + exception.InstanceIsLocked, + exception.InvalidVolume, + ) as e: raise exc.HTTPConflict(explanation=e.format_message()) - except exception.InstanceInvalidState as state_error: - common.raise_http_conflict_for_instance_invalid_state(state_error, - 'rescue', id) - except exception.InvalidVolume as volume_error: - raise exc.HTTPConflict(explanation=volume_error.format_message()) - except exception.InstanceNotRescuable as non_rescuable: - raise exc.HTTPBadRequest( - explanation=non_rescuable.format_message()) + except exception.InstanceInvalidState as e: + common.raise_http_conflict_for_instance_invalid_state( + e, 'rescue', id) + except ( + exception.InstanceNotRescuable, + exception.UnsupportedRescueImage, + ) as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) if CONF.api.enable_instance_password: return {'adminPass': password} @@ -85,12 +88,11 @@ def _rescue(self, req, id, body): def _unrescue(self, req, id, body): """Unrescue an instance.""" context = req.environ["nova.context"] - context.can(rescue_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, id) + context.can(rescue_policies.UNRESCUE_POLICY_NAME, + target={'project_id': instance.project_id}) try: self.compute_api.unrescue(context, instance) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst index a7b43d800ec..d7a74870f90 100644 --- a/nova/api/openstack/compute/rest_api_version_history.rst +++ b/nova/api/openstack/compute/rest_api_version_history.rst @@ -10,7 +10,7 @@ user documentation. --- This is the initial version of the v2.1 API which supports -microversions. The V2.1 API is from the REST API users's point of +microversions. The V2.1 API is from the REST API users' point of view exactly the same as v2.0 except with strong input validation. A user can specify a header in the API request:: @@ -84,22 +84,23 @@ Example response:: "remote_console": { "protocol": "vnc", "type": "novnc", - "url": "http://example.com:6080/vnc_auto.html?token=XYZ" + "url": "http://example.com:6080/vnc_auto.html?path=%3Ftoken%3DXYZ" } } -The old APIs 'os-getVNCConsole', 'os-getSPICEConsole', 'os-getSerialConsole' -and 'os-getRDPConsole' are removed. +The old APIs ``os-getVNCConsole``, ``os-getSPICEConsole``, +``os-getSerialConsole`` and ``os-getRDPConsole`` are removed. 2.7 --- Check the ``is_public`` attribute of a flavor before adding tenant access -to it. Reject the request with HTTPConflict error. +to it. Reject the request with ``HTTPConflict`` error. 2.8 --- -Add 'mks' protocol and 'webmks' type for remote consoles. + +Add ``mks`` protocol and ``webmks`` type for remote consoles. 2.9 --- @@ -111,7 +112,7 @@ a lock on the server, ``false`` otherwise. 2.10 ---- -Added user_id parameter to os-keypairs plugin, as well as a new property +Added ``user_id`` parameter to ``os-keypairs`` plugin, as well as a new property in the request body, for the create operation. Administrators will be able to list, get details and delete keypairs owned by @@ -142,8 +143,9 @@ API response data. Remove ``onSharedStorage`` parameter from server's evacuate action. Nova will automatically detect if the instance is on shared storage. -Also adminPass is removed from the response body. The user can get the -password with the server's os-server-password action. + +``adminPass`` is removed from the response body. The user can get the +password with the server's ``os-server-password`` action. 2.15 ---- @@ -154,7 +156,7 @@ From this version of the API users can choose 'soft-affinity' and 2.16 ---- -Exposes new host_status attribute for servers/detail and servers/{server_id}. +Exposes new ``host_status`` attribute for servers/detail and servers/{server_id}. Ability to get nova-compute status when querying servers. By default, this is only exposed to cloud administrators. @@ -166,18 +168,21 @@ systems in instance may need different configurations to trigger crash dump. 2.18 ---- + Establishes a set of routes that makes project_id an optional construct in v2.1. 2.19 ---- + Allow the user to set and get the server description. The user will be able to set the description when creating, rebuilding, or updating a server, and get the description as part of the server details. 2.20 ---- + From this version of the API user can call detach and attach volumes for -instances which are in shelved and shelved_offloaded state. +instances which are in ``shelved`` and ``shelved_offloaded`` state. 2.21 ---- @@ -188,8 +193,8 @@ instances. 2.22 ---- -A new resource servers:migrations added. A new API to force live migration -to complete added:: +A new resource, ``servers:migrations``, is added. A new API to force live +migration to complete added:: POST /servers//migrations//action { @@ -201,10 +206,9 @@ to complete added:: From this version of the API users can get the migration summary list by index API or the information of a specific migration by get API. -And the old top-level resource `/os-migrations` won't be extended anymore. -Add migration_type for old /os-migrations API, also add ref link to the -/servers/{uuid}/migrations/{id} for it when the migration is an in-progress -live-migration. +Add ``migration_type`` for old ``/os-migrations`` API, also add ``ref`` link to +the ``/servers/{uuid}/migrations/{id}`` for it when the migration is an +in-progress live-migration. 2.24 ---- @@ -216,8 +220,8 @@ A new API call to cancel a running live migration:: 2.25 (Maximum in Mitaka) ------------------------ -Modify input parameter for ``os-migrateLive``. The block_migration will -support 'auto' value, and disk_over_commit flag will be removed. +Modify input parameter for ``os-migrateLive``. The ``block_migration`` field now +supports an ``auto`` value and the ``disk_over_commit`` flag is removed. 2.26 ---- @@ -225,7 +229,7 @@ support 'auto' value, and disk_over_commit flag will be removed. Added support of server tags. A user can create, update, delete or check existence of simple string tags -for servers by the os-server-tags plugin. +for servers by the ``os-server-tags`` plugin. Tags have the following schema restrictions: @@ -237,30 +241,30 @@ Tags have the following schema restrictions: * All other characters are allowed to be in a tag name * Each server can have up to 50 tags. -The resource point for these operations is /servers//tags +The resource point for these operations is ``/servers//tags``. -A user can add a single tag to the server by sending PUT request to the -/servers//tags/ +A user can add a single tag to the server by making a ``PUT`` request to +``/servers//tags/``. -where is any valid tag name. +where ```` is any valid tag name. -A user can replace **all** current server tags to the new set of tags -by sending PUT request to the /servers//tags. New set of tags -must be specified in request body. This set must be in list 'tags'. +A user can replace **all** current server tags to the new set of tags by making +a ``PUT`` request to the ``/servers//tags``. The new set of tags must +be specified in request body. This set must be in list ``tags``. -A user can remove specified tag from the server by sending DELETE request -to the /servers//tags/ +A user can remove specified tag from the server by making a ``DELETE`` request +to ``/servers//tags/``. -where is tag name which user wants to remove. +where ```` is tag name which user wants to remove. -A user can remove **all** tags from the server by sending DELETE request -to the /servers//tags +A user can remove **all** tags from the server by making a ``DELETE`` request to +the ``/servers//tags``. -A user can get a set of server tags with information about server by sending -GET request to the /servers/ +A user can get a set of server tags with information about server by making a +``GET`` request to ``/servers/``. Request returns dictionary with information about specified server, including -list 'tags' :: +list ``tags``:: { 'id': {server_id}, @@ -268,8 +272,8 @@ list 'tags' :: 'tags': ['foo', 'bar', 'baz'] } -A user can get **only** a set of server tags by sending GET request to the -/servers//tags +A user can get **only** a set of server tags by making a ``GET`` request to +``/servers//tags``. Response :: @@ -277,22 +281,24 @@ Response :: 'tags': ['foo', 'bar', 'baz'] } -A user can check if a tag exists or not on a server by sending -GET /servers/{server_id}/tags/{tag} +A user can check if a tag exists or not on a server by making a ``GET`` request +to ``/servers/{server_id}/tags/{tag}``. -Request returns `204 No Content` if tag exist on a server or `404 Not Found` +Request returns ``204 No Content`` if tag exist on a server or ``404 Not Found`` if tag doesn't exist on a server. -A user can filter servers in GET /servers request by new filters: +A user can filter servers in ``GET /servers`` request by new filters: -* tags -* tags-any -* not-tags -* not-tags-any +* ``tags`` +* ``tags-any`` +* ``not-tags`` +* ``not-tags-any`` These filters can be combined. Also user can use more than one string tags for each filter. In this case string tags for each filter must be separated -by comma: GET /servers?tags=red&tags-any=green,orange +by comma. For example:: + + GET /servers?tags=red&tags-any=green,orange 2.27 ---- @@ -305,11 +311,11 @@ Both the original form of header and the new form is supported. 2.28 ---- -Nova API hypervisor.cpu_info change from string to JSON object. +Nova API ``hypervisor.cpu_info`` change from string to JSON object. -From this version of the API the hypervisor's 'cpu_info' field will be -will returned as JSON object (not string) by sending GET request -to the /v2.1/os-hypervisors/{hypervisor_id}. +From this version of the API the hypervisor's ``cpu_info`` field will be +returned as JSON object (not string) by sending GET request +to the ``/v2.1/os-hypervisors/{hypervisor_id}``. 2.29 ---- @@ -332,8 +338,8 @@ the ``force`` attribute is set. 2.31 ---- -Fix os-console-auth-tokens to return connection info for all types of tokens, -not just RDP. +Fix ``os-console-auth-tokens`` to return connection info for all types of +tokens, not just RDP. 2.32 ---- @@ -409,11 +415,37 @@ API endpoints as below:: '/os-baremetal-nodes' '/os-fping' -.. note:: A `regression`_ was introduced in this microversion which broke the +.. note:: + + A `regression`__ was introduced in this microversion which broke the ``force`` parameter in the ``PUT /os-quota-sets`` API. The fix will have to be applied to restore this functionality. -.. _regression: https://bugs.launchpad.net/nova/+bug/1733886 + __ https://bugs.launchpad.net/nova/+bug/1733886 + +.. versionchanged:: 18.0.0 + + The ``os-fping`` API was completely removed in the 18.0.0 (Rocky) release. + On deployments newer than this, the API will return HTTP 410 (Gone) + regardless of the requested microversion. + +.. versionchanged:: 21.0.0 + + The ``os-security-group-default-rules`` API was completely removed in the + 21.0.0 (Ussuri) release. On deployments newer than this, the APIs will + return HTTP 410 (Gone) regardless of the requested microversion. + +.. versionchanged:: 21.0.0 + + The ``os-networks`` API was partially removed in the 21.0.0 (Ussuri) + release. On deployments newer than this, some endpoints of the API will + return HTTP 410 (Gone) regardless of the requested microversion. + +.. versionchanged:: 21.0.0 + + The ``os-tenant-networks`` API was partially removed in the 21.0.0 (Ussuri) + release. On deployments newer than this, some endpoints of the API will + return HTTP 410 (Gone) regardless of the requested microversion. 2.37 ---- @@ -716,7 +748,7 @@ The embedded flavor description will not be included in server representations. ---- Updates the POST request body for the ``migrate`` action to include the -the optional ``host`` string field defaulted to ``null``. If ``host`` is +optional ``host`` string field defaulted to ``null``. If ``host`` is set the migrate action verifies the provided host with the nova scheduler and uses it as the destination for the migration. @@ -842,3 +874,331 @@ in server group APIs: Add support for abort live migrations in ``queued`` and ``preparing`` status for API ``DELETE /servers/{server_id}/migrations/{migration_id}``. + +2.66 +---- + +The ``changes-before`` filter can be included as a request parameter of the +following APIs to filter by changes before or equal to the resource +``updated_at`` time: + +* ``GET /servers`` +* ``GET /servers/detail`` +* ``GET /servers/{server_id}/os-instance-actions`` +* ``GET /os-migrations`` + +2.67 +---- + +Adds the ``volume_type`` parameter to ``block_device_mapping_v2``, which can +be used to specify cinder ``volume_type`` when creating a server. + +2.68 +---- + +Remove support for forced live migration and evacuate server actions. + +2.69 +---- + +Add support for returning minimal constructs for ``GET /servers``, +``GET /servers/detail``, ``GET /servers/{server_id}`` and ``GET /os-services`` +when there is a transient unavailability condition in the deployment like an +infrastructure failure. Starting from this microversion, the responses from the +down part of the infrastructure for the above four requests will have missing +key values to make it more resilient. The response body will only have a +minimal set of information obtained from the available information in the API +database for the down cells. See `handling down cells +`__ for +more information. + +2.70 +---- + +Exposes virtual device tags for volume attachments and virtual interfaces +(ports). A ``tag`` parameter is added to the response body for the following +APIs: + +**Volumes** + +* GET /servers/{server_id}/os-volume_attachments (list) +* GET /servers/{server_id}/os-volume_attachments/{volume_id} (show) +* POST /servers/{server_id}/os-volume_attachments (attach) + +**Ports** + +* GET /servers/{server_id}/os-interface (list) +* GET /servers/{server_id}/os-interface/{port_id} (show) +* POST /servers/{server_id}/os-interface (attach) + +2.71 +---- + +The ``server_groups`` parameter will be in the response body of the following +APIs to list the server groups to which the server belongs: + +* ``GET /servers/{server_id}`` +* ``PUT /servers/{server_id}`` +* ``POST /servers/{server_id}/action (rebuild)`` + +2.72 (Maximum in Stein) +----------------------- + +API microversion 2.72 adds support for creating servers with neutron ports +that has resource request, e.g. neutron ports with +`QoS minimum bandwidth rule`_. Deleting servers with such ports have +already been handled properly as well as detaching these type of ports. + +API limitations: + +* Creating servers with Neutron networks having QoS minimum bandwidth rule + is not supported. + +* Attaching Neutron ports and networks having QoS minimum bandwidth rule + is not supported. + +* Moving (resizing, migrating, live-migrating, evacuating, + unshelving after shelve offload) servers with ports having resource + request is not yet supported. + +.. _QoS minimum bandwidth rule: https://docs.openstack.org/neutron/latest/admin/config-qos-min-bw.html + +2.73 +---- + +API microversion 2.73 adds support for specifying a reason when locking the +server and exposes this information via ``GET /servers/detail``, +``GET /servers/{server_id}``, ``PUT servers/{server_id}`` and +``POST /servers/{server_id}/action`` where the action is rebuild. It also +supports ``locked`` as a filter/sort parameter for ``GET /servers/detail`` +and ``GET /servers``. + +2.74 +---- + +API microversion 2.74 adds support for specifying optional ``host`` +and/or ``hypervisor_hostname`` parameters in the request body of +``POST /servers``. These request a specific destination host/node +to boot the requested server. These parameters are mutually exclusive +with the special ``availability_zone`` format of ``zone:host:node``. +Unlike ``zone:host:node``, the ``host`` and/or ``hypervisor_hostname`` +parameters still allow scheduler filters to be run. If the requested +host/node is unavailable or otherwise unsuitable, earlier failure will +be raised. +There will be also a new policy named +``compute:servers:create:requested_destination``. By default, +it can be specified by administrators only. + +2.75 +---- + +Multiple API cleanups are done in API microversion 2.75: + +* 400 error response for an unknown parameter in the querystring or request + body. + +* Make the server representation consistent among GET, PUT + and rebuild server API responses. ``PUT /servers/{server_id}`` + and ``POST /servers/{server_id}/action {rebuild}`` API responses are + modified to add all the missing fields which are returned + by ``GET /servers/{server_id}``. + +* Change the default return value of the ``swap`` field from the empty + string to 0 (integer) in flavor APIs. + +* Always return the ``servers`` field in the response of the + ``GET /os-hypervisors``, ``GET /os-hypervisors/detail`` and + ``GET /os-hypervisors/{hypervisor_id}`` APIs even when there are no servers + on a hypervisor. + +2.76 +---- + +Adds ``power-update`` event name to ``os-server-external-events`` API. The +changes to the power state of an instance caused by this event can be viewed +through ``GET /servers/{server_id}/os-instance-actions`` and +``GET /servers/{server_id}/os-instance-actions/{request_id}``. + +2.77 +---- + +API microversion 2.77 adds support for specifying availability zone when +unshelving a shelved offloaded server. + +2.78 +---- + +Add server sub-resource ``topology`` to show server NUMA information. + +* ``GET /servers/{server_id}/topology`` + +The default behavior is configurable using two new policies: + +* ``compute:server:topology:index`` +* ``compute:server:topology:host:index`` + +.. Keep a reference for python-novaclient releasenotes +.. _id71: + +2.79 (Maximum in Train) +----------------------- + +API microversion 2.79 adds support for specifying the ``delete_on_termination`` +field in the request body when attaching a volume to a server, to support +configuring whether to delete the data volume when the server is destroyed. +Also, ``delete_on_termination`` is added to the GET responses when showing +attached volumes, and the ``delete_on_termination`` field is contained +in the POST API response body when attaching a volume. + +The affected APIs are as follows: + +* ``POST /servers/{server_id}/os-volume_attachments`` +* ``GET /servers/{server_id}/os-volume_attachments`` +* ``GET /servers/{server_id}/os-volume_attachments/{volume_id}`` + +2.80 +---- + +Microversion 2.80 changes the list migrations APIs and the os-migrations API. + +Expose the ``user_id`` and ``project_id`` fields in the following APIs: + +* ``GET /os-migrations`` +* ``GET /servers/{server_id}/migrations`` +* ``GET /servers/{server_id}/migrations/{migration_id}`` + +The ``GET /os-migrations`` API will also have optional ``user_id`` and +``project_id`` query parameters for filtering migrations by user and/or +project, for example: + +* ``GET /os-migrations?user_id=ef9d34b4-45d0-4530-871b-3fb535988394`` +* ``GET /os-migrations?project_id=011ee9f4-8f16-4c38-8633-a254d420fd54`` +* ``GET /os-migrations?user_id=ef9d34b4-45d0-4530-871b-3fb535988394&project_id=011ee9f4-8f16-4c38-8633-a254d420fd54`` + +2.81 +---- + +Adds support for image cache management by aggregate by adding +``POST /os-aggregates/{aggregate_id}/images``. + +2.82 +---- + +Adds ``accelerator-request-bound`` event to ``os-server-external-events`` +API. This event is sent by Cyborg to indicate completion of the binding +event for one accelerator request (ARQ) associated with an instance. + +2.83 +---- + +Allow the following filter parameters for ``GET /servers/detail`` +and ``GET /servers`` for non-admin : + +* ``availability_zone`` +* ``config_drive`` +* ``key_name`` +* ``created_at`` +* ``launched_at`` +* ``terminated_at`` +* ``power_state`` +* ``task_state`` +* ``vm_state`` +* ``progress`` +* ``user_id`` + +2.84 +---- + +The ``GET /servers/{server_id}/os-instance-actions/{request_id}`` API returns +a ``details`` parameter for each failed event with a fault message, similar to +the server ``fault.message`` parameter in ``GET /servers/{server_id}`` for a +server with status ``ERROR``. + +2.85 +---- + +Adds the ability to specify ``delete_on_termination`` in the +``PUT /servers/{server_id}/os-volume_attachments/{volume_id}`` API, which +allows changing the behavior of volume deletion on instance deletion. + +2.86 +---- + +Add support for validation of known extra specs. This is enabled by default +for the following APIs: + +* ``POST /flavors/{flavor_id}/os-extra_specs`` +* ``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` + +Validation is only used for recognized extra spec namespaces, currently: +``accel``, ``aggregate_instance_extra_specs``, ``capabilities``, ``hw``, +``hw_rng``, ``hw_video``, ``os``, ``pci_passthrough``, ``powervm``, ``quota``, +``resources``, ``trait``, and ``vmware``. + +.. _microversion 2.87: + +2.87 (Maximum in Ussuri and Victoria) +------------------------------------- + +Adds support for rescuing boot from volume instances when the compute host +reports the ``COMPUTE_BFV_RESCUE`` capability trait. + +.. _microversion 2.88: + +2.88 (Maximum in Wallaby) +------------------------- + +The following fields are no longer included in responses for the +``GET /os-hypervisors/detail`` and ``GET /os-hypervisors/{hypervisor_id}`` +APIs: + +- ``current_workload`` +- ``cpu_info`` +- ``vcpus`` +- ``vcpus_used`` +- ``free_disk_gb`` +- ``local_gb`` +- ``local_gb_used`` +- ``disk_available_least`` +- ``free_ram_mb`` +- ``memory_mb`` +- ``memory_mb_used`` +- ``running_vms`` + +These fields were removed as the information they provided were frequently +misleading or outright wrong, and many can be better queried from placement. + +In addition, the ``GET /os-hypervisors/statistics`` API, which provided a +summary view with just the fields listed above, has been removed entirely and +will now raise a HTTP 404 with microversion 2.88 or greater. + +Finally, the ``GET /os-hypervisors/{hypervisor}/uptime`` API, which provided a +similar response to the ``GET /os-hypervisors/detail`` and ``GET +/os-hypervisors/{hypervisor_id}`` APIs but with an additional ``uptime`` field, +has been removed in favour of including this field in the primary ``GET +/os-hypervisors/detail`` and ``GET /os-hypervisors/{hypervisor_id}`` APIs. + +.. _microversion 2.89: + +2.89 +---- + +``attachment_id`` and ``bdm_uuid`` are now included in the responses for ``GET +/servers/{server_id}/os-volume_attachments`` and ``GET +/servers/{server_id}/os-volume_attachments/{volume_id}``. Additionally the +``id`` field is dropped from the response as it duplicates the ``volumeId`` +field. + +.. _microversion 2.90: + +2.90 (Maximum in Xena) +---------------------- + +The ``POST /servers`` (create server), ``PUT /servers/{id}`` (update server) +and ``POST /servers/{server_id}/action (rebuild)`` (rebuild server) APIs now +accept a ``hostname`` parameter, allowing users to configure a hostname when +creating the instance. When specified, this will replace the auto-generated +hostname based on the display name. + +In addition, the ``OS-EXT-SRV-ATTR:hostname`` field for all server +responses is now visible to all users. Previously this was an admin-only field. diff --git a/nova/api/openstack/compute/routes.py b/nova/api/openstack/compute/routes.py index 06ba417cd2d..0966b727bec 100644 --- a/nova/api/openstack/compute/routes.py +++ b/nova/api/openstack/compute/routes.py @@ -28,17 +28,12 @@ from nova.api.openstack.compute import cells from nova.api.openstack.compute import certificates from nova.api.openstack.compute import cloudpipe -from nova.api.openstack.compute import config_drive from nova.api.openstack.compute import console_auth_tokens from nova.api.openstack.compute import console_output from nova.api.openstack.compute import consoles from nova.api.openstack.compute import create_backup from nova.api.openstack.compute import deferred_delete from nova.api.openstack.compute import evacuate -from nova.api.openstack.compute import extended_availability_zone -from nova.api.openstack.compute import extended_server_attributes -from nova.api.openstack.compute import extended_status -from nova.api.openstack.compute import extended_volumes from nova.api.openstack.compute import extension_info from nova.api.openstack.compute import fixed_ips from nova.api.openstack.compute import flavor_access @@ -50,11 +45,9 @@ from nova.api.openstack.compute import floating_ips from nova.api.openstack.compute import floating_ips_bulk from nova.api.openstack.compute import fping -from nova.api.openstack.compute import hide_server_addresses from nova.api.openstack.compute import hosts from nova.api.openstack.compute import hypervisors from nova.api.openstack.compute import image_metadata -from nova.api.openstack.compute import image_size from nova.api.openstack.compute import images from nova.api.openstack.compute import instance_actions from nova.api.openstack.compute import instance_usage_audit_log @@ -81,14 +74,13 @@ from nova.api.openstack.compute import server_migrations from nova.api.openstack.compute import server_password from nova.api.openstack.compute import server_tags -from nova.api.openstack.compute import server_usage +from nova.api.openstack.compute import server_topology from nova.api.openstack.compute import servers from nova.api.openstack.compute import services from nova.api.openstack.compute import shelve from nova.api.openstack.compute import simple_tenant_usage from nova.api.openstack.compute import suspend_server from nova.api.openstack.compute import tenant_networks -from nova.api.openstack.compute import used_limits from nova.api.openstack.compute import versionsV21 from nova.api.openstack.compute import virtual_interfaces from nova.api.openstack.compute import volumes @@ -96,66 +88,60 @@ from nova.api import wsgi as base_wsgi -def _create_controller(main_controller, controller_list, - action_controller_list): +def _create_controller(main_controller, action_controller_list): """This is a helper method to create controller with a - list of extended controller. This is for backward compatible - with old extension interface. Finally, the controller for the - same resource will be merged into single one controller. + list of action controller. """ controller = wsgi.Resource(main_controller()) - for ctl in controller_list: - controller.register_extensions(ctl()) for ctl in action_controller_list: controller.register_actions(ctl()) return controller agents_controller = functools.partial( - _create_controller, agents.AgentController, [], []) + _create_controller, agents.AgentController, []) aggregates_controller = functools.partial( - _create_controller, aggregates.AggregateController, [], []) + _create_controller, aggregates.AggregateController, []) assisted_volume_snapshots_controller = functools.partial( _create_controller, - assisted_volume_snapshots.AssistedVolumeSnapshotsController, [], []) + assisted_volume_snapshots.AssistedVolumeSnapshotsController, []) availability_zone_controller = functools.partial( - _create_controller, availability_zone.AvailabilityZoneController, [], []) + _create_controller, availability_zone.AvailabilityZoneController, []) baremetal_nodes_controller = functools.partial( - _create_controller, baremetal_nodes.BareMetalNodeController, [], []) + _create_controller, baremetal_nodes.BareMetalNodeController, []) cells_controller = functools.partial( - _create_controller, cells.CellsController, [], []) + _create_controller, cells.CellsController, []) certificates_controller = functools.partial( - _create_controller, certificates.CertificatesController, [], []) + _create_controller, certificates.CertificatesController, []) cloudpipe_controller = functools.partial( - _create_controller, cloudpipe.CloudpipeController, [], []) + _create_controller, cloudpipe.CloudpipeController, []) extensions_controller = functools.partial( - _create_controller, extension_info.ExtensionInfoController, [], []) + _create_controller, extension_info.ExtensionInfoController, []) fixed_ips_controller = functools.partial(_create_controller, - fixed_ips.FixedIPController, [], []) + fixed_ips.FixedIPController, []) flavor_controller = functools.partial(_create_controller, flavors.FlavorsController, - [], [ flavor_manage.FlavorManageController, flavor_access.FlavorActionController @@ -164,121 +150,104 @@ def _create_controller(main_controller, controller_list, flavor_access_controller = functools.partial(_create_controller, - flavor_access.FlavorAccessController, [], []) + flavor_access.FlavorAccessController, []) flavor_extraspec_controller = functools.partial(_create_controller, - flavors_extraspecs.FlavorExtraSpecsController, [], []) + flavors_extraspecs.FlavorExtraSpecsController, []) floating_ip_dns_controller = functools.partial(_create_controller, - floating_ip_dns.FloatingIPDNSDomainController, [], []) + floating_ip_dns.FloatingIPDNSDomainController, []) floating_ip_dnsentry_controller = functools.partial(_create_controller, - floating_ip_dns.FloatingIPDNSEntryController, [], []) + floating_ip_dns.FloatingIPDNSEntryController, []) floating_ip_pools_controller = functools.partial(_create_controller, - floating_ip_pools.FloatingIPPoolsController, [], []) + floating_ip_pools.FloatingIPPoolsController, []) floating_ips_controller = functools.partial(_create_controller, - floating_ips.FloatingIPController, [], []) + floating_ips.FloatingIPController, []) floating_ips_bulk_controller = functools.partial(_create_controller, - floating_ips_bulk.FloatingIPBulkController, [], []) + floating_ips_bulk.FloatingIPBulkController, []) fping_controller = functools.partial(_create_controller, - fping.FpingController, [], []) + fping.FpingController, []) hosts_controller = functools.partial( - _create_controller, hosts.HostController, [], []) + _create_controller, hosts.HostController, []) hypervisors_controller = functools.partial( - _create_controller, hypervisors.HypervisorsController, [], []) + _create_controller, hypervisors.HypervisorsController, []) images_controller = functools.partial( - _create_controller, images.ImagesController, - [image_size.ImageSizeController], []) + _create_controller, images.ImagesController, []) image_metadata_controller = functools.partial( - _create_controller, image_metadata.ImageMetadataController, - [], []) + _create_controller, image_metadata.ImageMetadataController, []) instance_actions_controller = functools.partial(_create_controller, - instance_actions.InstanceActionsController, [], []) + instance_actions.InstanceActionsController, []) instance_usage_audit_log_controller = functools.partial(_create_controller, - instance_usage_audit_log.InstanceUsageAuditLogController, [], []) + instance_usage_audit_log.InstanceUsageAuditLogController, []) ips_controller = functools.partial(_create_controller, - ips.IPsController, [], []) + ips.IPsController, []) keypairs_controller = functools.partial( - _create_controller, keypairs.KeypairController, [], []) + _create_controller, keypairs.KeypairController, []) limits_controller = functools.partial( - _create_controller, limits.LimitsController, - [ - used_limits.UsedLimitsController, - ], - []) + _create_controller, limits.LimitsController, []) migrations_controller = functools.partial(_create_controller, - migrations.MigrationsController, [], []) + migrations.MigrationsController, []) networks_controller = functools.partial(_create_controller, - networks.NetworkController, [], + networks.NetworkController, [networks_associate.NetworkAssociateActionController]) quota_classes_controller = functools.partial(_create_controller, - quota_classes.QuotaClassSetsController, [], []) + quota_classes.QuotaClassSetsController, []) quota_set_controller = functools.partial(_create_controller, - quota_sets.QuotaSetsController, [], []) + quota_sets.QuotaSetsController, []) security_group_controller = functools.partial(_create_controller, - security_groups.SecurityGroupController, [], []) + security_groups.SecurityGroupController, []) security_group_default_rules_controller = functools.partial(_create_controller, - security_group_default_rules.SecurityGroupDefaultRulesController, [], []) + security_group_default_rules.SecurityGroupDefaultRulesController, []) security_group_rules_controller = functools.partial(_create_controller, - security_groups.SecurityGroupRulesController, [], []) + security_groups.SecurityGroupRulesController, []) server_controller = functools.partial(_create_controller, servers.ServersController, - [ - config_drive.ConfigDriveController, - extended_availability_zone.ExtendedAZController, - extended_server_attributes.ExtendedServerAttributesController, - extended_status.ExtendedStatusController, - extended_volumes.ExtendedVolumesController, - hide_server_addresses.Controller, - keypairs.Controller, - security_groups.SecurityGroupsOutputController, - server_usage.ServerUsageController, - ], [ admin_actions.AdminActionsController, admin_password.AdminPasswordController, @@ -301,88 +270,90 @@ def _create_controller(main_controller, controller_list, console_auth_tokens_controller = functools.partial(_create_controller, - console_auth_tokens.ConsoleAuthTokensController, [], []) + console_auth_tokens.ConsoleAuthTokensController, []) consoles_controller = functools.partial(_create_controller, - consoles.ConsolesController, [], []) + consoles.ConsolesController, []) server_diagnostics_controller = functools.partial(_create_controller, - server_diagnostics.ServerDiagnosticsController, [], []) + server_diagnostics.ServerDiagnosticsController, []) server_external_events_controller = functools.partial(_create_controller, - server_external_events.ServerExternalEventsController, [], []) + server_external_events.ServerExternalEventsController, []) server_groups_controller = functools.partial(_create_controller, - server_groups.ServerGroupController, [], []) + server_groups.ServerGroupController, []) server_metadata_controller = functools.partial(_create_controller, - server_metadata.ServerMetadataController, [], []) + server_metadata.ServerMetadataController, []) server_migrations_controller = functools.partial(_create_controller, - server_migrations.ServerMigrationsController, [], []) + server_migrations.ServerMigrationsController, []) server_os_interface_controller = functools.partial(_create_controller, - attach_interfaces.InterfaceAttachmentController, [], []) + attach_interfaces.InterfaceAttachmentController, []) server_password_controller = functools.partial(_create_controller, - server_password.ServerPasswordController, [], []) + server_password.ServerPasswordController, []) server_remote_consoles_controller = functools.partial(_create_controller, - remote_consoles.RemoteConsolesController, [], []) + remote_consoles.RemoteConsolesController, []) server_security_groups_controller = functools.partial(_create_controller, - security_groups.ServerSecurityGroupController, [], []) + security_groups.ServerSecurityGroupController, []) server_tags_controller = functools.partial(_create_controller, - server_tags.ServerTagsController, [], []) + server_tags.ServerTagsController, []) +server_topology_controller = functools.partial(_create_controller, + server_topology.ServerTopologyController, []) server_volume_attachments_controller = functools.partial(_create_controller, - volumes.VolumeAttachmentController, [], []) + volumes.VolumeAttachmentController, []) services_controller = functools.partial(_create_controller, - services.ServiceController, [], []) + services.ServiceController, []) simple_tenant_usage_controller = functools.partial(_create_controller, - simple_tenant_usage.SimpleTenantUsageController, [], []) + simple_tenant_usage.SimpleTenantUsageController, []) snapshots_controller = functools.partial(_create_controller, - volumes.SnapshotController, [], []) + volumes.SnapshotController, []) tenant_networks_controller = functools.partial(_create_controller, - tenant_networks.TenantNetworkController, [], []) + tenant_networks.TenantNetworkController, []) version_controller = functools.partial(_create_controller, - versionsV21.VersionsController, [], []) + versionsV21.VersionsController, []) virtual_interfaces_controller = functools.partial(_create_controller, - virtual_interfaces.ServerVirtualInterfaceController, [], []) + virtual_interfaces.ServerVirtualInterfaceController, []) volumes_controller = functools.partial(_create_controller, - volumes.VolumeController, [], []) + volumes.VolumeController, []) # NOTE(alex_xu): This is structure of this route list as below: # ( -# ('Route path': { +# ('Route path', { # 'HTTP method: [ # 'Controller', # 'The method of controller is used to handle this route' @@ -482,6 +453,9 @@ def _create_controller(main_controller, controller_list, ('/os-aggregates/{id}/action', { 'POST': [aggregates_controller, 'action'], }), + ('/os-aggregates/{id}/images', { + 'POST': [aggregates_controller, 'images'], + }), ('/os-assisted-volume-snapshots', { 'POST': [assisted_volume_snapshots_controller, 'create'] }), @@ -658,7 +632,9 @@ def _create_controller(main_controller, controller_list, }), ('/os-quota-class-sets/{id}', { 'GET': [quota_classes_controller, 'show'], - 'PUT': [quota_classes_controller, 'update'] + 'PUT': [quota_classes_controller, 'update'], + 'POST': [quota_classes_controller, 'create'], + 'DELETE': [quota_classes_controller, 'delete'] }), ('/os-quota-sets/{id}', { 'GET': [quota_set_controller, 'show'], @@ -703,6 +679,7 @@ def _create_controller(main_controller, controller_list, }), ('/os-server-groups/{id}', { 'GET': [server_groups_controller, 'show'], + 'PUT': [server_groups_controller, 'update'], 'DELETE': [server_groups_controller, 'delete'] }), ('/os-services', { @@ -863,6 +840,9 @@ def _create_controller(main_controller, controller_list, 'PUT': [server_tags_controller, 'update'], 'DELETE': [server_tags_controller, 'delete'] }), + ('/servers/{server_id}/topology', { + 'GET': [server_topology_controller, 'index'] + }), ) diff --git a/nova/api/openstack/compute/schemas/agents.py b/nova/api/openstack/compute/schemas/agents.py deleted file mode 100644 index 3ef66c84eb3..00000000000 --- a/nova/api/openstack/compute/schemas/agents.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from nova.api.validation import parameter_types - -create = { - 'type': 'object', - 'properties': { - 'agent': { - 'type': 'object', - 'properties': { - 'hypervisor': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'pattern': '^[a-zA-Z0-9-._ ]*$' - }, - 'os': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'pattern': '^[a-zA-Z0-9-._ ]*$' - }, - 'architecture': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'pattern': '^[a-zA-Z0-9-._ ]*$' - }, - 'version': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'pattern': '^[a-zA-Z0-9-._ ]*$' - }, - 'url': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'format': 'uri' - }, - 'md5hash': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'pattern': '^[a-fA-F0-9]*$' - }, - }, - 'required': ['hypervisor', 'os', 'architecture', 'version', - 'url', 'md5hash'], - 'additionalProperties': False, - }, - }, - 'required': ['agent'], - 'additionalProperties': False, -} - - -update = { - 'type': 'object', - 'properties': { - 'para': { - 'type': 'object', - 'properties': { - 'version': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'pattern': '^[a-zA-Z0-9-._ ]*$' - }, - 'url': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'format': 'uri' - }, - 'md5hash': { - 'type': 'string', 'minLength': 0, 'maxLength': 255, - 'pattern': '^[a-fA-F0-9]*$' - }, - }, - 'required': ['version', 'url', 'md5hash'], - 'additionalProperties': False, - }, - }, - 'required': ['para'], - 'additionalProperties': False, -} - -index_query = { - 'type': 'object', - 'properties': { - 'hypervisor': parameter_types.common_query_param - }, - # NOTE(gmann): This is kept True to keep backward compatibility. - # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. - 'additionalProperties': True -} diff --git a/nova/api/openstack/compute/schemas/aggregate_images.py b/nova/api/openstack/compute/schemas/aggregate_images.py new file mode 100644 index 00000000000..b1b0cf84da7 --- /dev/null +++ b/nova/api/openstack/compute/schemas/aggregate_images.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.validation import parameter_types + + +aggregate_images_v2_81 = { + 'type': 'object', + 'properties': { + 'cache': { + 'type': ['array'], + 'minItems': 1, + 'items': { + 'type': 'object', + 'properties': { + 'id': parameter_types.image_id, + }, + 'additionalProperties': False, + 'required': ['id'], + }, + }, + }, + 'required': ['cache'], + 'additionalProperties': False, +} diff --git a/nova/api/openstack/compute/schemas/aggregates.py b/nova/api/openstack/compute/schemas/aggregates.py index 396e4fe31c0..75c9ea8a50d 100644 --- a/nova/api/openstack/compute/schemas/aggregates.py +++ b/nova/api/openstack/compute/schemas/aggregates.py @@ -85,7 +85,7 @@ 'add_host': { 'type': 'object', 'properties': { - 'host': parameter_types.hostname, + 'host': parameter_types.fqdn, }, 'required': ['host'], 'additionalProperties': False, @@ -102,7 +102,7 @@ 'remove_host': { 'type': 'object', 'properties': { - 'host': parameter_types.hostname, + 'host': parameter_types.fqdn, }, 'required': ['host'], 'additionalProperties': False, diff --git a/nova/api/openstack/compute/schemas/assisted_volume_snapshots.py b/nova/api/openstack/compute/schemas/assisted_volume_snapshots.py index 74e3da34347..0a13c50b114 100644 --- a/nova/api/openstack/compute/schemas/assisted_volume_snapshots.py +++ b/nova/api/openstack/compute/schemas/assisted_volume_snapshots.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import copy + from nova.api.validation import parameter_types snapshots_create = { @@ -58,7 +60,10 @@ }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. + # does not raise 400. In microversion 2.75, we have blocked the additional + # parameters. 'additionalProperties': True } + +delete_query_275 = copy.deepcopy(delete_query) +delete_query_275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/schemas/cells.py b/nova/api/openstack/compute/schemas/cells.py deleted file mode 100644 index f0d52f6792b..00000000000 --- a/nova/api/openstack/compute/schemas/cells.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from nova.api.validation import parameter_types - - -create = { - 'type': 'object', - 'properties': { - 'cell': { - 'type': 'object', - 'properties': { - 'name': parameter_types.cell_name, - 'type': { - 'type': 'string', - 'enum': ['parent', 'child'], - }, - - # NOTE: In unparse_transport_url(), a url consists of the - # following parameters: - # "qpid://:@:/" - # or - # "rabbit://:@:/" - # Then the url is stored into transport_url of cells table - # which is defined with String(255). - 'username': { - 'type': 'string', 'maxLength': 255, - 'pattern': '^[a-zA-Z0-9-_]*$' - }, - 'password': { - # Allow to specify any string for strong password. - 'type': 'string', 'maxLength': 255, - }, - 'rpc_host': parameter_types.hostname_or_ip_address, - 'rpc_port': parameter_types.tcp_udp_port, - 'rpc_virtual_host': parameter_types.hostname_or_ip_address, - }, - 'required': ['name'], - 'additionalProperties': False, - }, - }, - 'required': ['cell'], - 'additionalProperties': False, -} - - -create_v20 = copy.deepcopy(create) -create_v20['properties']['cell']['properties']['name'] = (parameter_types. - cell_name_leading_trailing_spaces) - - -update = { - 'type': 'object', - 'properties': { - 'cell': { - 'type': 'object', - 'properties': { - 'name': parameter_types.cell_name, - 'type': { - 'type': 'string', - 'enum': ['parent', 'child'], - }, - 'username': { - 'type': 'string', 'maxLength': 255, - 'pattern': '^[a-zA-Z0-9-_]*$' - }, - 'password': { - 'type': 'string', 'maxLength': 255, - }, - 'rpc_host': parameter_types.hostname_or_ip_address, - 'rpc_port': parameter_types.tcp_udp_port, - 'rpc_virtual_host': parameter_types.hostname_or_ip_address, - }, - 'additionalProperties': False, - }, - }, - 'required': ['cell'], - 'additionalProperties': False, -} - - -update_v20 = copy.deepcopy(create) -update_v20['properties']['cell']['properties']['name'] = (parameter_types. - cell_name_leading_trailing_spaces) - - -sync_instances = { - 'type': 'object', - 'properties': { - 'project_id': parameter_types.project_id, - 'deleted': parameter_types.boolean, - 'updated_since': { - 'type': 'string', - 'format': 'date-time', - }, - }, - 'additionalProperties': False, -} diff --git a/nova/api/openstack/compute/schemas/evacuate.py b/nova/api/openstack/compute/schemas/evacuate.py index 439ab2d3fd1..a415a97f891 100644 --- a/nova/api/openstack/compute/schemas/evacuate.py +++ b/nova/api/openstack/compute/schemas/evacuate.py @@ -23,7 +23,7 @@ 'evacuate': { 'type': 'object', 'properties': { - 'host': parameter_types.hostname, + 'host': parameter_types.fqdn, 'onSharedStorage': parameter_types.boolean, 'adminPass': parameter_types.admin_password, }, @@ -42,3 +42,7 @@ evacuate_v2_29 = copy.deepcopy(evacuate_v214) evacuate_v2_29['properties']['evacuate']['properties'][ 'force'] = parameter_types.boolean + +# v2.68 removes the 'force' parameter added in v2.29, meaning it is identical +# to v2.14 +evacuate_v2_68 = copy.deepcopy(evacuate_v214) diff --git a/nova/api/openstack/compute/schemas/flavor_manage.py b/nova/api/openstack/compute/schemas/flavor_manage.py index 08899a21eee..ad3ebe57a7a 100644 --- a/nova/api/openstack/compute/schemas/flavor_manage.py +++ b/nova/api/openstack/compute/schemas/flavor_manage.py @@ -39,7 +39,7 @@ # positive ( > 0) float 'rxtx_factor': { 'type': ['number', 'string'], - 'pattern': '^[0-9]+(\.[0-9]+)?$', + 'pattern': r'^[0-9]+(\.[0-9]+)?$', 'minimum': 0, 'exclusiveMinimum': True, # maximum's value is limited to db constant's # SQL_SP_FLOAT_MAX (in nova/db/constants.py) diff --git a/nova/api/openstack/compute/schemas/flavors.py b/nova/api/openstack/compute/schemas/flavors.py index 91ca03e5d52..c6064f02ef4 100644 --- a/nova/api/openstack/compute/schemas/flavors.py +++ b/nova/api/openstack/compute/schemas/flavors.py @@ -11,8 +11,21 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +import copy + from nova.api.validation import parameter_types +# NOTE(takashin): The following sort keys are defined for backward +# compatibility. If they are changed, the API microversion should be bumped. +VALID_SORT_KEYS = [ + 'created_at', 'description', 'disabled', 'ephemeral_gb', 'flavorid', 'id', + 'is_public', 'memory_mb', 'name', 'root_gb', 'rxtx_factor', 'swap', + 'updated_at', 'vcpu_weight', 'vcpus' +] + +VALID_SORT_DIR = ['asc', 'desc'] + index_query = { 'type': 'object', 'properties': { @@ -22,12 +35,17 @@ 'is_public': parameter_types.multi_params({'type': 'string'}), 'minRam': parameter_types.multi_params({'type': 'string'}), 'minDisk': parameter_types.multi_params({'type': 'string'}), - 'sort_key': parameter_types.multi_params({'type': 'string'}), - 'sort_dir': parameter_types.multi_params({'type': 'string'}) + 'sort_key': parameter_types.multi_params({'type': 'string', + 'enum': VALID_SORT_KEYS}), + 'sort_dir': parameter_types.multi_params({'type': 'string', + 'enum': VALID_SORT_DIR}) }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. + # does not raise 400. In microversion 2.75, we have blocked the additional + # parameters. 'additionalProperties': True } + +index_query_275 = copy.deepcopy(index_query) +index_query_275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/schemas/hosts.py b/nova/api/openstack/compute/schemas/hosts.py index e708e8a1ca3..983e4283933 100644 --- a/nova/api/openstack/compute/schemas/hosts.py +++ b/nova/api/openstack/compute/schemas/hosts.py @@ -44,7 +44,7 @@ }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. + # does not raise 400. This API is deprecated in microversion 2.43 so we + # do not to update the additionalProperties to False. 'additionalProperties': True } diff --git a/nova/api/openstack/compute/schemas/hypervisors.py b/nova/api/openstack/compute/schemas/hypervisors.py index 392b324dd98..479855ab713 100644 --- a/nova/api/openstack/compute/schemas/hypervisors.py +++ b/nova/api/openstack/compute/schemas/hypervisors.py @@ -35,7 +35,7 @@ # and requesting hosted servers in the GET /os-hypervisors and # GET /os-hypervisors/detail response. 'hypervisor_hostname_pattern': parameter_types.single_param( - parameter_types.hostname), + parameter_types.fqdn), 'with_servers': parameter_types.single_param( parameter_types.boolean) }, diff --git a/nova/api/openstack/compute/schemas/instance_actions.py b/nova/api/openstack/compute/schemas/instance_actions.py index 25e61ff61ac..dc1f49a567f 100644 --- a/nova/api/openstack/compute/schemas/instance_actions.py +++ b/nova/api/openstack/compute/schemas/instance_actions.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import copy + from nova.api.validation import parameter_types list_query_params_v258 = { @@ -27,3 +29,9 @@ }, 'additionalProperties': False } + +list_query_params_v266 = copy.deepcopy(list_query_params_v258) +list_query_params_v266['properties'].update({ + 'changes-before': parameter_types.single_param( + {'type': 'string', 'format': 'date-time'}), +}) diff --git a/nova/api/openstack/compute/schemas/keypairs.py b/nova/api/openstack/compute/schemas/keypairs.py index a3410eee18d..7ebd3c7433e 100644 --- a/nova/api/openstack/compute/schemas/keypairs.py +++ b/nova/api/openstack/compute/schemas/keypairs.py @@ -105,3 +105,10 @@ show_query_schema_v210 = index_query_schema_v210 delete_query_schema_v20 = index_query_schema_v20 delete_query_schema_v210 = index_query_schema_v210 + +index_query_schema_v275 = copy.deepcopy(index_query_schema_v235) +index_query_schema_v275['additionalProperties'] = False +show_query_schema_v275 = copy.deepcopy(show_query_schema_v210) +show_query_schema_v275['additionalProperties'] = False +delete_query_schema_v275 = copy.deepcopy(delete_query_schema_v210) +delete_query_schema_v275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/schemas/limits.py b/nova/api/openstack/compute/schemas/limits.py index 6cd3da580bf..e269cc55ab0 100644 --- a/nova/api/openstack/compute/schemas/limits.py +++ b/nova/api/openstack/compute/schemas/limits.py @@ -11,6 +11,8 @@ # License for the specific language governing permissions and limitations # under the License. +import copy + from nova.api.validation import parameter_types @@ -20,5 +22,10 @@ 'tenant_id': parameter_types.common_query_param, }, # For backward compatible changes + # In microversion 2.75, we have blocked the additional + # parameters. 'additionalProperties': True } + +limits_query_schema_275 = copy.deepcopy(limits_query_schema) +limits_query_schema_275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/schemas/lock_server.py b/nova/api/openstack/compute/schemas/lock_server.py new file mode 100644 index 00000000000..8d3db82a9f7 --- /dev/null +++ b/nova/api/openstack/compute/schemas/lock_server.py @@ -0,0 +1,28 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +lock_v2_73 = { + 'type': 'object', + 'properties': { + 'lock': { + 'type': ['object', 'null'], + 'properties': { + 'locked_reason': { + 'type': 'string', 'minLength': 1, 'maxLength': 255, + }, + }, + 'additionalProperties': False, + }, + }, + 'required': ['lock'], + 'additionalProperties': False, +} diff --git a/nova/api/openstack/compute/schemas/migrate_server.py b/nova/api/openstack/compute/schemas/migrate_server.py index 5cb28efc467..8a274fbc6e1 100644 --- a/nova/api/openstack/compute/schemas/migrate_server.py +++ b/nova/api/openstack/compute/schemas/migrate_server.py @@ -17,7 +17,7 @@ from nova.api.validation import parameter_types -host = copy.deepcopy(parameter_types.hostname) +host = copy.deepcopy(parameter_types.fqdn) host['type'] = ['string', 'null'] migrate_v2_56 = { @@ -68,3 +68,7 @@ migrate_live_v2_30 = copy.deepcopy(migrate_live_v2_25) migrate_live_v2_30['properties']['os-migrateLive']['properties'][ 'force'] = parameter_types.boolean + +# v2.68 removes the 'force' parameter added in v2.30, meaning it is identical +# to v2.25 +migrate_live_v2_68 = copy.deepcopy(migrate_live_v2_25) diff --git a/nova/api/openstack/compute/schemas/migrations.py b/nova/api/openstack/compute/schemas/migrations.py index 31f07b0ce39..0979e58c9dd 100644 --- a/nova/api/openstack/compute/schemas/migrations.py +++ b/nova/api/openstack/compute/schemas/migrations.py @@ -41,3 +41,17 @@ {'type': 'string', 'format': 'date-time'}), }) list_query_params_v259['additionalProperties'] = False + +list_query_params_v266 = copy.deepcopy(list_query_params_v259) +list_query_params_v266['properties'].update({ + 'changes-before': parameter_types.single_param( + {'type': 'string', 'format': 'date-time'}), +}) + +list_query_params_v280 = copy.deepcopy(list_query_params_v266) +list_query_params_v280['properties'].update({ + # The 2.80 microversion added support for filtering migrations + # by user_id and/or project_id + 'user_id': parameter_types.single_param({'type': 'string'}), + 'project_id': parameter_types.single_param({'type': 'string'}), +}) diff --git a/nova/api/openstack/compute/schemas/networks.py b/nova/api/openstack/compute/schemas/networks.py deleted file mode 100644 index 2fd68709439..00000000000 --- a/nova/api/openstack/compute/schemas/networks.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.api.validation import parameter_types - - -create = { - 'type': 'object', - 'properties': { - 'network': { - 'type': 'object', - 'properties': { - 'label': { - 'type': 'string', 'maxLength': 255 - }, - 'ipam': parameter_types.boolean, - 'cidr': parameter_types.cidr, - 'cidr_v6': parameter_types.cidr, - 'project_id': parameter_types.project_id, - 'multi_host': parameter_types.boolean, - 'gateway': parameter_types.ipv4, - 'gateway_v6': parameter_types.ipv6, - 'bridge': { - 'type': 'string', - }, - 'bridge_interface': { - 'type': 'string', - }, - # NOTE: In _extract_subnets(), dns1, dns2 dhcp_server are - # used only for IPv4, not IPv6. - 'dns1': parameter_types.ipv4, - 'dns2': parameter_types.ipv4, - 'dhcp_server': parameter_types.ipv4, - - 'fixed_cidr': parameter_types.cidr, - 'allowed_start': parameter_types.ip_address, - 'allowed_end': parameter_types.ip_address, - 'enable_dhcp': parameter_types.boolean, - 'share_address': parameter_types.boolean, - 'mtu': parameter_types.positive_integer_with_empty_str, - 'vlan': parameter_types.positive_integer_with_empty_str, - 'vlan_start': parameter_types.positive_integer_with_empty_str, - 'vpn_start': { - 'type': 'string', - }, - }, - 'required': ['label'], - 'oneOf': [ - {'required': ['cidr']}, - {'required': ['cidr_v6']} - ], - 'additionalProperties': False, - }, - }, - 'required': ['network'], - 'additionalProperties': False, -} - -add_network_to_project = { - 'type': 'object', - 'properties': { - 'id': {'type': ['string', 'null']} - }, - 'required': ['id'], - 'additionalProperties': False -} diff --git a/nova/api/openstack/compute/schemas/networks_associate.py b/nova/api/openstack/compute/schemas/networks_associate.py deleted file mode 100644 index 73fc6bf7d20..00000000000 --- a/nova/api/openstack/compute/schemas/networks_associate.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.api.validation import parameter_types - -associate_host = { - 'type': 'object', - 'properties': { - 'associate_host': parameter_types.hostname - }, - 'required': ['associate_host'], - 'additionalProperties': False -} diff --git a/nova/api/openstack/compute/schemas/quota_sets.py b/nova/api/openstack/compute/schemas/quota_sets.py index 1942488ac25..236f1235f4b 100644 --- a/nova/api/openstack/compute/schemas/quota_sets.py +++ b/nova/api/openstack/compute/schemas/quota_sets.py @@ -41,6 +41,8 @@ 'injected_file_path_bytes': common_quota, 'server_groups': common_quota, 'server_group_members': common_quota, + # NOTE(stephenfin): This will always be rejected since it was nova-network + # only, but we need to allow users to submit it at a minimum 'networks': common_quota } @@ -60,7 +62,7 @@ 'type': 'object', 'quota_set': { 'properties': update_quota_set, - 'additionalProperties': False, + 'additionalProperties': True, }, }, 'required': ['quota_set'], @@ -85,7 +87,10 @@ }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. + # does not raise 400. In microversion 2.75, we have blocked the additional + # parameters. 'additionalProperties': True } + +query_schema_275 = copy.deepcopy(query_schema) +query_schema_275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/schemas/remote_consoles.py b/nova/api/openstack/compute/schemas/remote_consoles.py index baa47468075..a93eed7d9c0 100644 --- a/nova/api/openstack/compute/schemas/remote_consoles.py +++ b/nova/api/openstack/compute/schemas/remote_consoles.py @@ -77,7 +77,7 @@ 'properties': { 'type': { 'type': 'string', - 'enum': ['serial'], + 'enum': ['serial', 'shellinabox'], }, }, 'required': ['type'], @@ -101,7 +101,7 @@ 'type': { 'type': 'string', 'enum': ['novnc', 'xvpvnc', 'rdp-html5', - 'spice-html5', 'serial'], + 'spice-html5', 'serial', 'shellinabox'], }, }, 'required': ['protocol', 'type'], @@ -125,7 +125,7 @@ 'type': { 'type': 'string', 'enum': ['novnc', 'xvpvnc', 'rdp-html5', - 'spice-html5', 'serial', 'webmks'], + 'spice-html5', 'serial', 'webmks', 'shellinabox'], }, }, 'required': ['protocol', 'type'], diff --git a/nova/api/openstack/compute/schemas/security_groups.py b/nova/api/openstack/compute/schemas/security_groups.py index 5e025b069a7..8e0dc661712 100644 --- a/nova/api/openstack/compute/schemas/security_groups.py +++ b/nova/api/openstack/compute/schemas/security_groups.py @@ -25,7 +25,7 @@ }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. + # does not raise 400. This API is deprecated in microversion 2.36 so we + # do not to update the additionalProperties to False. 'additionalProperties': True } diff --git a/nova/api/openstack/compute/schemas/server_external_events.py b/nova/api/openstack/compute/schemas/server_external_events.py index 38435e0dc5c..b8a89e047d4 100644 --- a/nova/api/openstack/compute/schemas/server_external_events.py +++ b/nova/api/openstack/compute/schemas/server_external_events.py @@ -55,3 +55,11 @@ create_v251 = copy.deepcopy(create) name = create_v251['properties']['events']['items']['properties']['name'] name['enum'].append('volume-extended') + +create_v276 = copy.deepcopy(create_v251) +name = create_v276['properties']['events']['items']['properties']['name'] +name['enum'].append('power-update') + +create_v282 = copy.deepcopy(create_v276) +name = create_v282['properties']['events']['items']['properties']['name'] +name['enum'].append('accelerator-request-bound') diff --git a/nova/api/openstack/compute/schemas/server_groups.py b/nova/api/openstack/compute/schemas/server_groups.py index d8401e15834..e814ebcb60f 100644 --- a/nova/api/openstack/compute/schemas/server_groups.py +++ b/nova/api/openstack/compute/schemas/server_groups.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + import copy from nova.api.validation import parameter_types @@ -27,15 +28,18 @@ 'name': parameter_types.name, 'policies': { # This allows only a single item and it must be one of the - # enumerated values. So this is really just a single string - # value, but for legacy reasons is an array. We could - # probably change the type from array to string with a - # microversion at some point but it's very low priority. + # enumerated values. It's changed to a single string value + # in 2.64. 'type': 'array', - 'items': [{ - 'type': 'string', - 'enum': ['anti-affinity', 'affinity']}], + 'items': [ + { + 'type': 'string', + 'enum': ['anti-affinity', 'affinity'], + }, + ], 'uniqueItems': True, + 'minItems': 1, + 'maxItems': 1, 'additionalItems': False, } }, @@ -80,6 +84,25 @@ 'offset': parameter_types.multi_params( parameter_types.non_negative_integer), }, - # For backward compatible changes + # For backward compatible changes. In microversion 2.75, we have + # blocked the additional parameters. 'additionalProperties': True } + +update = { + 'type': 'object', + 'properties': { + 'add_members': { + 'type': 'array', + 'items': parameter_types.server_id, + }, + 'remove_members': { + 'type': 'array', + 'items': parameter_types.server_id, + } + }, + 'additionalProperties': False +} + +server_groups_query_param_275 = copy.deepcopy(server_groups_query_param) +server_groups_query_param_275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/schemas/servers.py b/nova/api/openstack/compute/schemas/servers.py index 768288725fc..300411de40a 100644 --- a/nova/api/openstack/compute/schemas/servers.py +++ b/nova/api/openstack/compute/schemas/servers.py @@ -136,7 +136,7 @@ 'build_near_host_ip': parameter_types.ip_address, 'cidr': { 'type': 'string', - 'pattern': '^\/[0-9a-f.:]+$' + 'pattern': '^/[0-9a-f.:]+$' }, }, # NOTE: As this Mail: @@ -146,7 +146,7 @@ 'additionalProperties': True } -base_create = { +create = { 'type': 'object', 'properties': { 'server': { @@ -201,7 +201,7 @@ 'type': 'object', 'properties': { # NOTE(oomichi): allocate_for_instance() of - # neutronv2/api.py gets security_group names + # network/neutron.py gets security_group names # or UUIDs from this parameter. # parameter_types.name allows both format. 'name': parameter_types.name, @@ -225,33 +225,31 @@ 'additionalProperties': False, } - -base_create_v20 = copy.deepcopy(base_create) -base_create_v20['properties']['server'][ +create_v20 = copy.deepcopy(create) +create_v20['properties']['server'][ 'properties']['name'] = parameter_types.name_with_leading_trailing_spaces -base_create_v20['properties']['server']['properties'][ +create_v20['properties']['server']['properties'][ 'availability_zone'] = parameter_types.name_with_leading_trailing_spaces -base_create_v20['properties']['server']['properties'][ +create_v20['properties']['server']['properties'][ 'key_name'] = parameter_types.name_with_leading_trailing_spaces -base_create_v20['properties']['server']['properties'][ +create_v20['properties']['server']['properties'][ 'security_groups']['items']['properties']['name'] = ( parameter_types.name_with_leading_trailing_spaces) -base_create_v20['properties']['server']['properties'][ - 'user_data'] = { - 'oneOf': [{'type': 'string', 'format': 'base64', 'maxLength': 65535}, - {'type': 'null'}, - ], - } - -base_create_v219 = copy.deepcopy(base_create) -base_create_v219['properties']['server'][ +create_v20['properties']['server']['properties']['user_data'] = { + 'oneOf': [{'type': 'string', 'format': 'base64', 'maxLength': 65535}, + {'type': 'null'}, + ], +} + +create_v219 = copy.deepcopy(create) +create_v219['properties']['server'][ 'properties']['description'] = parameter_types.description -base_create_v232 = copy.deepcopy(base_create_v219) -base_create_v232['properties']['server'][ +create_v232 = copy.deepcopy(create_v219) +create_v232['properties']['server'][ 'properties']['networks']['items'][ 'properties']['tag'] = parameter_types.tag -base_create_v232['properties']['server'][ +create_v232['properties']['server'][ 'properties']['block_device_mapping_v2']['items'][ 'properties']['tag'] = parameter_types.tag @@ -262,15 +260,15 @@ # in version 2.32 only. Since we need a new microversion to add request # body attributes, tags have been re-added in version 2.42. -# NOTE(gmann) Below schema 'base_create_v233' is added (builds on 2.19 schema) +# NOTE(gmann) Below schema 'create_v233' is added (builds on 2.19 schema) # to keep the above mentioned behavior while merging the extension schema code # into server schema file. Below is the ref code where BDM tag was originally # got added for 2.32 microversion *only*. -# Ref- https://github.com/openstack/nova/blob/ +# Ref- https://opendev.org/openstack/nova/src/commit/ # 9882a60e69a5ab8da314a199a56defc05098b743/nova/api/ # openstack/compute/block_device_mapping.py#L71 -base_create_v233 = copy.deepcopy(base_create_v219) -base_create_v233['properties']['server'][ +create_v233 = copy.deepcopy(create_v219) +create_v233['properties']['server'][ 'properties']['networks']['items'][ 'properties']['tag'] = parameter_types.tag @@ -278,9 +276,9 @@ # 1. server.networks is required # 2. server.networks is now either an enum or a list # 3. server.networks.uuid is now required to be a uuid -base_create_v237 = copy.deepcopy(base_create_v233) -base_create_v237['properties']['server']['required'].append('networks') -base_create_v237['properties']['server']['properties']['networks'] = { +create_v237 = copy.deepcopy(create_v233) +create_v237['properties']['server']['required'].append('networks') +create_v237['properties']['server']['properties']['networks'] = { 'oneOf': [ {'type': 'array', 'items': { @@ -299,11 +297,10 @@ {'type': 'string', 'enum': ['none', 'auto']}, ]} - # 2.42 builds on 2.37 and re-introduces the tag field to the list of network # objects. -base_create_v242 = copy.deepcopy(base_create_v237) -base_create_v242['properties']['server']['properties']['networks'] = { +create_v242 = copy.deepcopy(create_v237) +create_v242['properties']['server']['properties']['networks'] = { 'oneOf': [ {'type': 'array', 'items': { @@ -322,34 +319,48 @@ }, {'type': 'string', 'enum': ['none', 'auto']}, ]} -base_create_v242['properties']['server'][ +create_v242['properties']['server'][ 'properties']['block_device_mapping_v2']['items'][ 'properties']['tag'] = parameter_types.tag - # 2.52 builds on 2.42 and makes the following changes: # Allowing adding tags to instances when booting -base_create_v252 = copy.deepcopy(base_create_v242) -base_create_v252['properties']['server']['properties']['tags'] = { +create_v252 = copy.deepcopy(create_v242) +create_v252['properties']['server']['properties']['tags'] = { "type": "array", "items": parameter_types.tag, "maxItems": instance.MAX_TAG_COUNT } - # 2.57 builds on 2.52 and removes the personality parameter. -base_create_v257 = copy.deepcopy(base_create_v252) -base_create_v257['properties']['server']['properties'].pop('personality') - +create_v257 = copy.deepcopy(create_v252) +create_v257['properties']['server']['properties'].pop('personality') # 2.63 builds on 2.57 and makes the following changes: # Allowing adding trusted certificates to instances when booting -base_create_v263 = copy.deepcopy(base_create_v257) -base_create_v263['properties']['server']['properties'][ +create_v263 = copy.deepcopy(create_v257) +create_v263['properties']['server']['properties'][ 'trusted_image_certificates'] = parameter_types.trusted_certs - -base_update = { +# Add volume type in block_device_mapping_v2. +create_v267 = copy.deepcopy(create_v263) +create_v267['properties']['server']['properties'][ + 'block_device_mapping_v2']['items'][ + 'properties']['volume_type'] = parameter_types.volume_type + +# Add host and hypervisor_hostname in server +create_v274 = copy.deepcopy(create_v267) +create_v274['properties']['server'][ + 'properties']['host'] = parameter_types.fqdn +create_v274['properties']['server'][ + 'properties']['hypervisor_hostname'] = parameter_types.fqdn + +# Add hostname in server +create_v290 = copy.deepcopy(create_v274) +create_v290['properties']['server'][ + 'properties']['hostname'] = parameter_types.hostname + +update = { 'type': 'object', 'properties': { 'server': { @@ -367,16 +378,20 @@ 'additionalProperties': False, } - -base_update_v20 = copy.deepcopy(base_update) -base_update_v20['properties']['server'][ +update_v20 = copy.deepcopy(update) +update_v20['properties']['server'][ 'properties']['name'] = parameter_types.name_with_leading_trailing_spaces -base_update_v219 = copy.deepcopy(base_update) -base_update_v219['properties']['server'][ +update_v219 = copy.deepcopy(update) +update_v219['properties']['server'][ 'properties']['description'] = parameter_types.description -base_rebuild = { + +update_v290 = copy.deepcopy(update_v219) +update_v290['properties']['server'][ + 'properties']['hostname'] = parameter_types.hostname + +rebuild = { 'type': 'object', 'properties': { 'rebuild': { @@ -400,25 +415,24 @@ 'additionalProperties': False, } - -base_rebuild_v20 = copy.deepcopy(base_rebuild) -base_rebuild_v20['properties']['rebuild'][ +rebuild_v20 = copy.deepcopy(rebuild) +rebuild_v20['properties']['rebuild'][ 'properties']['name'] = parameter_types.name_with_leading_trailing_spaces -base_rebuild_v219 = copy.deepcopy(base_rebuild) -base_rebuild_v219['properties']['rebuild'][ +rebuild_v219 = copy.deepcopy(rebuild) +rebuild_v219['properties']['rebuild'][ 'properties']['description'] = parameter_types.description -base_rebuild_v254 = copy.deepcopy(base_rebuild_v219) -base_rebuild_v254['properties']['rebuild'][ +rebuild_v254 = copy.deepcopy(rebuild_v219) +rebuild_v254['properties']['rebuild'][ 'properties']['key_name'] = parameter_types.name_or_none # 2.57 builds on 2.54 and makes the following changes: # 1. Remove the personality parameter. # 2. Add the user_data parameter which is nullable so user_data can be reset. -base_rebuild_v257 = copy.deepcopy(base_rebuild_v254) -base_rebuild_v257['properties']['rebuild']['properties'].pop('personality') -base_rebuild_v257['properties']['rebuild']['properties']['user_data'] = ({ +rebuild_v257 = copy.deepcopy(rebuild_v254) +rebuild_v257['properties']['rebuild']['properties'].pop('personality') +rebuild_v257['properties']['rebuild']['properties']['user_data'] = ({ 'oneOf': [ {'type': 'string', 'format': 'base64', 'maxLength': 65535}, {'type': 'null'} @@ -427,10 +441,15 @@ # 2.63 builds on 2.57 and makes the following changes: # Allowing adding trusted certificates to instances when rebuilding -base_rebuild_v263 = copy.deepcopy(base_rebuild_v257) -base_rebuild_v263['properties']['rebuild']['properties'][ +rebuild_v263 = copy.deepcopy(rebuild_v257) +rebuild_v263['properties']['rebuild']['properties'][ 'trusted_image_certificates'] = parameter_types.trusted_certs +rebuild_v290 = copy.deepcopy(rebuild_v263) +rebuild_v290['properties']['rebuild']['properties'][ + 'hostname'] = parameter_types.hostname + + resize = { 'type': 'object', 'properties': { @@ -465,7 +484,6 @@ 'additionalProperties': False } - create_image_v20 = copy.deepcopy(create_image) create_image_v20['properties']['createImage'][ 'properties']['name'] = parameter_types.name_with_leading_trailing_spaces @@ -524,6 +542,9 @@ 'shutdown_terminate', 'user_data', 'vcpus', 'vm_mode' ] +# From microversion 2.73 we start offering locked as a valid sort key. +SERVER_LIST_IGNORE_SORT_KEY_V273 = list(SERVER_LIST_IGNORE_SORT_KEY) +SERVER_LIST_IGNORE_SORT_KEY_V273.remove('locked') VALID_SORT_KEYS = { "type": "string", @@ -538,6 +559,14 @@ SERVER_LIST_IGNORE_SORT_KEY } +# We reuse the existing list and add locked to the list of valid sort keys. +VALID_SORT_KEYS_V273 = { + "type": "string", + "enum": ['locked'] + list( + set(VALID_SORT_KEYS["enum"]) - set(SERVER_LIST_IGNORE_SORT_KEY)) + + SERVER_LIST_IGNORE_SORT_KEY_V273 +} + query_params_v21 = { 'type': 'object', 'properties': { @@ -599,6 +628,8 @@ # For backward-compatible additionalProperties is set to be True here. # And we will either strip the extra params out or raise HTTP 400 # according to the params' value in the later process. + # This has been changed to False in microversion 2.75. From + # microversion 2.75, no additional unknown parameter will be allowed. 'additionalProperties': True, # Prevent internal-attributes that are started with underscore from # being striped out in schema validation, and raise HTTP 400 in API. @@ -621,3 +652,34 @@ 'not-tags': parameter_types.common_query_regex_param, 'not-tags-any': parameter_types.common_query_regex_param, }) + +query_params_v266 = copy.deepcopy(query_params_v226) +query_params_v266['properties'].update({ + 'changes-before': multi_params({'type': 'string', + 'format': 'date-time'}), +}) + +query_params_v273 = copy.deepcopy(query_params_v266) +query_params_v273['properties'].update({ + 'sort_key': multi_params(VALID_SORT_KEYS_V273), + 'locked': parameter_types.common_query_param, +}) + +# Microversion 2.75 makes query schema to disallow any invalid or unknown +# query parameters (filter or sort keys). +# *****Schema updates for microversion 2.75 start here******* +query_params_v275 = copy.deepcopy(query_params_v273) +# 1. Update sort_keys to allow only valid sort keys: +# NOTE(gmann): Remove the ignored sort keys now because 'additionalProperties' +# is Flase for query schema. Starting from miceoversion 2.75, API will +# raise 400 for any not-allowed sort keys instead of ignoring them. +VALID_SORT_KEYS_V275 = copy.deepcopy(VALID_SORT_KEYS_V273) +VALID_SORT_KEYS_V275['enum'] = list( + set(VALID_SORT_KEYS_V273["enum"]) - set( + SERVER_LIST_IGNORE_SORT_KEY_V273)) +query_params_v275['properties'].update({ + 'sort_key': multi_params(VALID_SORT_KEYS_V275), +}) +# 2. Make 'additionalProperties' False. +query_params_v275['additionalProperties'] = False +# *****Schema updates for microversion 2.75 end here******* diff --git a/nova/api/openstack/compute/schemas/services.py b/nova/api/openstack/compute/schemas/services.py index 45cded092f5..2ebd4b9f650 100644 --- a/nova/api/openstack/compute/schemas/services.py +++ b/nova/api/openstack/compute/schemas/services.py @@ -12,12 +12,14 @@ # License for the specific language governing permissions and limitations # under the License. +import copy + from nova.api.validation import parameter_types service_update = { 'type': 'object', 'properties': { - 'host': parameter_types.hostname, + 'host': parameter_types.fqdn, 'binary': { 'type': 'string', 'minLength': 1, 'maxLength': 255, }, @@ -32,7 +34,7 @@ service_update_v211 = { 'type': 'object', 'properties': { - 'host': parameter_types.hostname, + 'host': parameter_types.fqdn, 'binary': { 'type': 'string', 'minLength': 1, 'maxLength': 255, }, @@ -76,3 +78,6 @@ # For backward compatible changes 'additionalProperties': True } + +index_query_schema_275 = copy.deepcopy(index_query_schema) +index_query_schema_275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/schemas/shelve.py b/nova/api/openstack/compute/schemas/shelve.py new file mode 100644 index 00000000000..e8d2f1c2406 --- /dev/null +++ b/nova/api/openstack/compute/schemas/shelve.py @@ -0,0 +1,37 @@ +# Copyright 2019 INSPUR Corporation. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.validation import parameter_types + +# NOTE(brinzhang): For older microversion there will be no change as +# schema is applied only for >2.77 with unshelve a server API. +# Anything working in old version keep working as it is. +unshelve_v277 = { + 'type': 'object', + 'properties': { + 'unshelve': { + 'type': ['object', 'null'], + 'properties': { + 'availability_zone': parameter_types.name + }, + # NOTE: The allowed request body is {'unshelve': null} or + # {'unshelve': {'availability_zone': }}, not allowed + # {'unshelve': {}} as the request body for unshelve. + 'required': ['availability_zone'], + 'additionalProperties': False, + }, + }, + 'required': ['unshelve'], + 'additionalProperties': False, +} diff --git a/nova/api/openstack/compute/schemas/simple_tenant_usage.py b/nova/api/openstack/compute/schemas/simple_tenant_usage.py index c7e2a657233..7ac34416e66 100644 --- a/nova/api/openstack/compute/schemas/simple_tenant_usage.py +++ b/nova/api/openstack/compute/schemas/simple_tenant_usage.py @@ -25,8 +25,8 @@ }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. + # does not raise 400. In microversion 2.75, we have blocked the additional + # parameters. 'additionalProperties': True } @@ -38,8 +38,8 @@ }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. + # does not raise 400. In microversion 2.75, we have blocked the additional + # parameters. 'additionalProperties': True } @@ -50,3 +50,9 @@ show_query_v240 = copy.deepcopy(show_query) show_query_v240['properties'].update( parameter_types.pagination_parameters) + +index_query_275 = copy.deepcopy(index_query_v240) +index_query_275['additionalProperties'] = False + +show_query_275 = copy.deepcopy(show_query_v240) +show_query_275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/schemas/tenant_networks.py b/nova/api/openstack/compute/schemas/tenant_networks.py deleted file mode 100644 index f5f4c03ac3a..00000000000 --- a/nova/api/openstack/compute/schemas/tenant_networks.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2015 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.api.validation import parameter_types - -create = { - 'type': 'object', - 'properties': { - 'network': { - 'type': 'object', - 'properties': { - 'label': { - 'type': 'string', 'maxLength': 255 - }, - 'ipam': parameter_types.boolean, - 'cidr': parameter_types.cidr, - 'cidr_v6': parameter_types.cidr, - 'vlan_start': parameter_types.positive_integer_with_empty_str, - 'network_size': - parameter_types.positive_integer_with_empty_str, - 'num_networks': parameter_types.positive_integer_with_empty_str - }, - 'required': ['label'], - 'oneOf': [ - {'required': ['cidr']}, - {'required': ['cidr_v6']} - ], - 'additionalProperties': False, - }, - }, - 'required': ['network'], - 'additionalProperties': False, -} diff --git a/nova/api/openstack/compute/schemas/volumes.py b/nova/api/openstack/compute/schemas/volumes.py index 36bc1704b80..4e68bc78e82 100644 --- a/nova/api/openstack/compute/schemas/volumes.py +++ b/nova/api/openstack/compute/schemas/volumes.py @@ -74,7 +74,7 @@ # NOTE: The validation pattern from match_device() in # nova/block_device.py. 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' - } + }, }, 'required': ['volumeId'], 'additionalProperties': False, @@ -87,10 +87,44 @@ create_volume_attachment_v249['properties']['volumeAttachment'][ 'properties']['tag'] = parameter_types.tag +create_volume_attachment_v279 = copy.deepcopy(create_volume_attachment_v249) +create_volume_attachment_v279['properties']['volumeAttachment'][ + 'properties']['delete_on_termination'] = parameter_types.boolean + update_volume_attachment = copy.deepcopy(create_volume_attachment) del update_volume_attachment['properties']['volumeAttachment'][ 'properties']['device'] +# NOTE(brinzhang): Allow attachment_id, serverId, device, tag, and +# delete_on_termination (i.e., follow the content of the GET response) +# to be specified for RESTfulness, even though we will not allow updating +# all of them. +update_volume_attachment_v285 = { + 'type': 'object', + 'properties': { + 'volumeAttachment': { + 'type': 'object', + 'properties': { + 'volumeId': parameter_types.volume_id, + 'device': { + 'type': ['string', 'null'], + # NOTE: The validation pattern from match_device() in + # nova/block_device.py. + 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' + }, + 'tag': parameter_types.tag, + 'delete_on_termination': parameter_types.boolean, + 'serverId': parameter_types.server_id, + 'id': parameter_types.attachment_id + }, + 'required': ['volumeId'], + 'additionalProperties': False, + }, + }, + 'required': ['volumeAttachment'], + 'additionalProperties': False, +} + index_query = { 'type': 'object', 'properties': { @@ -101,9 +135,12 @@ }, # NOTE(gmann): This is kept True to keep backward compatibility. # As of now Schema validation stripped out the additional parameters and - # does not raise 400. In the future, we may block the additional parameters - # by bump in Microversion. + # does not raise 400. In microversion 2.75, we have blocked the additional + # parameters. 'additionalProperties': True } detail_query = index_query + +index_query_275 = copy.deepcopy(index_query) +index_query_275['additionalProperties'] = False diff --git a/nova/api/openstack/compute/security_group_default_rules.py b/nova/api/openstack/compute/security_group_default_rules.py index d208cd93fd6..fa1b300e171 100644 --- a/nova/api/openstack/compute/security_group_default_rules.py +++ b/nova/api/openstack/compute/security_group_default_rules.py @@ -14,120 +14,24 @@ from webob import exc -from nova.api.openstack.api_version_request \ - import MAX_PROXY_API_SUPPORT_VERSION -from nova.api.openstack.compute import security_groups as sg from nova.api.openstack import wsgi -from nova import exception -from nova.i18n import _ -from nova.network.security_group import openstack_driver -from nova.policies import security_group_default_rules as sgdr_policies -class SecurityGroupDefaultRulesController(sg.SecurityGroupControllerBase, - wsgi.Controller): +class SecurityGroupDefaultRulesController(wsgi.Controller): + """(Removed) Controller for default project security groups.""" - def __init__(self): - self.security_group_api = ( - openstack_driver.get_openstack_security_group_driver()) - - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.expected_errors((400, 409, 501)) + @wsgi.expected_errors(410) def create(self, req, body): - context = req.environ['nova.context'] - context.can(sgdr_policies.BASE_POLICY_NAME) - - sg_rule = self._from_body(body, 'security_group_default_rule') - - try: - values = self._rule_args_to_dict(to_port=sg_rule.get('to_port'), - from_port=sg_rule.get('from_port'), - ip_protocol=sg_rule.get('ip_protocol'), - cidr=sg_rule.get('cidr')) - except (exception.InvalidCidr, - exception.InvalidInput, - exception.InvalidIpProtocol, - exception.InvalidPortRange) as ex: - raise exc.HTTPBadRequest(explanation=ex.format_message()) - - if values is None: - msg = _('Not enough parameters to build a valid rule.') - raise exc.HTTPBadRequest(explanation=msg) + raise exc.HTTPGone() - if self.security_group_api.default_rule_exists(context, values): - msg = _('This default rule already exists.') - raise exc.HTTPConflict(explanation=msg) - security_group_rule = self.security_group_api.add_default_rules( - context, [values])[0] - fmt_rule = self._format_security_group_default_rule( - security_group_rule) - return {'security_group_default_rule': fmt_rule} - - def _rule_args_to_dict(self, to_port=None, from_port=None, - ip_protocol=None, cidr=None): - cidr = self.security_group_api.parse_cidr(cidr) - return self.security_group_api.new_cidr_ingress_rule( - cidr, ip_protocol, from_port, to_port) - - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.expected_errors((400, 404, 501)) + @wsgi.expected_errors(410) def show(self, req, id): - context = req.environ['nova.context'] - context.can(sgdr_policies.BASE_POLICY_NAME) - - try: - id = self.security_group_api.validate_id(id) - except exception.Invalid as ex: - raise exc.HTTPBadRequest(explanation=ex.format_message()) + raise exc.HTTPGone() - try: - rule = self.security_group_api.get_default_rule(context, id) - except exception.SecurityGroupDefaultRuleNotFound as ex: - raise exc.HTTPNotFound(explanation=ex.format_message()) - - fmt_rule = self._format_security_group_default_rule(rule) - return {"security_group_default_rule": fmt_rule} - - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.expected_errors((400, 404, 501)) - @wsgi.response(204) + @wsgi.expected_errors(410) def delete(self, req, id): - context = req.environ['nova.context'] - context.can(sgdr_policies.BASE_POLICY_NAME) - - try: - id = self.security_group_api.validate_id(id) - except exception.Invalid as ex: - raise exc.HTTPBadRequest(explanation=ex.format_message()) - - try: - rule = self.security_group_api.get_default_rule(context, id) - self.security_group_api.remove_default_rules(context, [rule['id']]) - except exception.SecurityGroupDefaultRuleNotFound as ex: - raise exc.HTTPNotFound(explanation=ex.format_message()) + raise exc.HTTPGone() - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.expected_errors((404, 501)) + @wsgi.expected_errors(410) def index(self, req): - context = req.environ['nova.context'] - context.can(sgdr_policies.BASE_POLICY_NAME) - - ret = {'security_group_default_rules': []} - try: - for rule in self.security_group_api.get_all_default_rules(context): - rule_fmt = self._format_security_group_default_rule(rule) - ret['security_group_default_rules'].append(rule_fmt) - except exception.SecurityGroupDefaultRuleNotFound as ex: - raise exc.HTTPNotFound(explanation=ex.format_message()) - - return ret - - def _format_security_group_default_rule(self, rule): - sg_rule = {} - sg_rule['id'] = rule['id'] - sg_rule['ip_protocol'] = rule['protocol'] - sg_rule['from_port'] = rule['from_port'] - sg_rule['to_port'] = rule['to_port'] - sg_rule['ip_range'] = {} - sg_rule['ip_range'] = {'cidr': rule['cidr']} - return sg_rule + raise exc.HTTPGone() diff --git a/nova/api/openstack/compute/security_groups.py b/nova/api/openstack/compute/security_groups.py index 9801f46dcd7..b10308393f4 100644 --- a/nova/api/openstack/compute/security_groups.py +++ b/nova/api/openstack/compute/security_groups.py @@ -16,7 +16,6 @@ """The security groups extension.""" from oslo_log import log as logging -from oslo_serialization import jsonutils from webob import exc from nova.api.openstack.api_version_request \ @@ -26,33 +25,24 @@ schema_security_groups from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ -from nova.network.security_group import openstack_driver +from nova.network import security_group_api from nova.policies import security_groups as sg_policies from nova.virt import netutils LOG = logging.getLogger(__name__) -ATTRIBUTE_NAME = 'security_groups' SG_NOT_FOUND = object() -def _authorize_context(req): - context = req.environ['nova.context'] - context.can(sg_policies.BASE_POLICY_NAME) - return context - - class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): - self.security_group_api = ( - openstack_driver.get_openstack_security_group_driver()) - self.compute_api = compute.API( - security_group_api=self.security_group_api) + super(SecurityGroupControllerBase, self).__init__() + self.compute_api = compute.API() def _format_security_group_rule(self, context, rule, group_rule_data=None): """Return a security group rule in desired API response format. @@ -72,7 +62,7 @@ def _format_security_group_rule(self, context, rule, group_rule_data=None): sg_rule['group'] = group_rule_data elif rule['group_id']: try: - source_group = self.security_group_api.get( + source_group = security_group_api.get( context, id=rule['group_id']) except exception.SecurityGroupNotFound: # NOTE(arosen): There is a possible race condition that can @@ -128,7 +118,7 @@ def _get_group_rule_data_by_rule_group_id(self, context, groups): if (rule_group_id and rule_group_id not in group_rule_data_by_rule_group_id): try: - source_group = self.security_group_api.get( + source_group = security_group_api.get( context, id=rule['group_id']) group_rule_data_by_rule_group_id[rule_group_id] = { 'name': source_group.get('name'), @@ -159,12 +149,13 @@ class SecurityGroupController(SecurityGroupControllerBase, wsgi.Controller): @wsgi.expected_errors((400, 404)) def show(self, req, id): """Return data about the given security group.""" - context = _authorize_context(req) + context = req.environ['nova.context'] + context.can(sg_policies.POLICY_NAME % 'show', + target={'project_id': context.project_id}) try: - id = self.security_group_api.validate_id(id) - security_group = self.security_group_api.get(context, None, id, - map_exception=True) + id = security_group_api.validate_id(id) + security_group = security_group_api.get(context, id) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: @@ -178,13 +169,14 @@ def show(self, req, id): @wsgi.response(202) def delete(self, req, id): """Delete a security group.""" - context = _authorize_context(req) + context = req.environ['nova.context'] + context.can(sg_policies.POLICY_NAME % 'delete', + target={'project_id': context.project_id}) try: - id = self.security_group_api.validate_id(id) - security_group = self.security_group_api.get(context, None, id, - map_exception=True) - self.security_group_api.destroy(context, security_group) + id = security_group_api.validate_id(id) + security_group = security_group_api.get(context, id) + security_group_api.destroy(context, security_group) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: @@ -195,15 +187,16 @@ def delete(self, req, id): @wsgi.expected_errors(404) def index(self, req): """Returns a list of security groups.""" - context = _authorize_context(req) + context = req.environ['nova.context'] + context.can(sg_policies.POLICY_NAME % 'get', + target={'project_id': context.project_id}) search_opts = {} search_opts.update(req.GET) project_id = context.project_id - raw_groups = self.security_group_api.list(context, - project=project_id, - search_opts=search_opts) + raw_groups = security_group_api.list( + context, project=project_id, search_opts=search_opts) limited_list = common.limited(raw_groups, req) result = [self._format_security_group(context, group) @@ -217,7 +210,9 @@ def index(self, req): @wsgi.expected_errors((400, 403)) def create(self, req, body): """Creates a new security group.""" - context = _authorize_context(req) + context = req.environ['nova.context'] + context.can(sg_policies.POLICY_NAME % 'create', + target={'project_id': context.project_id}) security_group = self._from_body(body, 'security_group') @@ -225,10 +220,10 @@ def create(self, req, body): group_description = security_group.get('description', None) try: - self.security_group_api.validate_property(group_name, 'name', None) - self.security_group_api.validate_property(group_description, - 'description', None) - group_ref = self.security_group_api.create_security_group( + security_group_api.validate_property(group_name, 'name', None) + security_group_api.validate_property(group_description, + 'description', None) + group_ref = security_group_api.create_security_group( context, group_name, group_description) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) @@ -242,12 +237,13 @@ def create(self, req, body): @wsgi.expected_errors((400, 404)) def update(self, req, id, body): """Update a security group.""" - context = _authorize_context(req) + context = req.environ['nova.context'] + context.can(sg_policies.POLICY_NAME % 'update', + target={'project_id': context.project_id}) try: - id = self.security_group_api.validate_id(id) - security_group = self.security_group_api.get(context, None, id, - map_exception=True) + id = security_group_api.validate_id(id) + security_group = security_group_api.get(context, id) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.Invalid as exp: @@ -258,10 +254,10 @@ def update(self, req, id, body): group_description = security_group_data.get('description', None) try: - self.security_group_api.validate_property(group_name, 'name', None) - self.security_group_api.validate_property(group_description, - 'description', None) - group_ref = self.security_group_api.update_security_group( + security_group_api.validate_property(group_name, 'name', None) + security_group_api.validate_property( + group_description, 'description', None) + group_ref = security_group_api.update_security_group( context, security_group, group_name, group_description) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) @@ -278,22 +274,22 @@ class SecurityGroupRulesController(SecurityGroupControllerBase, @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 403, 404)) def create(self, req, body): - context = _authorize_context(req) - + context = req.environ['nova.context'] + context.can(sg_policies.POLICY_NAME % 'rule:create', + target={'project_id': context.project_id}) sg_rule = self._from_body(body, 'security_group_rule') group_id = sg_rule.get('group_id') source_group = {} try: - parent_group_id = self.security_group_api.validate_id( + parent_group_id = security_group_api.validate_id( sg_rule.get('parent_group_id')) - security_group = self.security_group_api.get(context, None, - parent_group_id, - map_exception=True) + security_group = security_group_api.get( + context, parent_group_id) if group_id is not None: - group_id = self.security_group_api.validate_id(group_id) + group_id = security_group_api.validate_id(group_id) - source_group = self.security_group_api.get( + source_group = security_group_api.get( context, id=group_id) new_rule = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), @@ -325,7 +321,7 @@ def create(self, req, body): 'tenant_id': source_group.get('project_id')} security_group_rule = ( - self.security_group_api.create_security_group_rule( + security_group_api.create_security_group_rule( context, security_group, new_rule)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) @@ -343,28 +339,28 @@ def _rule_args_to_dict(self, context, to_port=None, from_port=None, ip_protocol=None, cidr=None, group_id=None): if group_id is not None: - return self.security_group_api.new_group_ingress_rule( - group_id, ip_protocol, from_port, to_port) + return security_group_api.new_group_ingress_rule( + group_id, ip_protocol, from_port, to_port) else: - cidr = self.security_group_api.parse_cidr(cidr) - return self.security_group_api.new_cidr_ingress_rule( - cidr, ip_protocol, from_port, to_port) + cidr = security_group_api.parse_cidr(cidr) + return security_group_api.new_cidr_ingress_rule( + cidr, ip_protocol, from_port, to_port) @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors((400, 404, 409)) @wsgi.response(202) def delete(self, req, id): - context = _authorize_context(req) + context = req.environ['nova.context'] + context.can(sg_policies.POLICY_NAME % 'rule:delete', + target={'project_id': context.project_id}) try: - id = self.security_group_api.validate_id(id) - rule = self.security_group_api.get_rule(context, id) + id = security_group_api.validate_id(id) + rule = security_group_api.get_rule(context, id) group_id = rule['parent_group_id'] - security_group = self.security_group_api.get(context, None, - group_id, - map_exception=True) - self.security_group_api.remove_rules(context, security_group, - [rule['id']]) + security_group = security_group_api.get(context, group_id) + security_group_api.remove_rules( + context, security_group, [rule['id']]) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.NoUniqueMatch as exp: @@ -378,13 +374,12 @@ class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.expected_errors(404) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" - context = _authorize_context(req) - - self.security_group_api.ensure_default(context) - + context = req.environ['nova.context'] instance = common.get_instance(self.compute_api, context, server_id) + context.can(sg_policies.POLICY_NAME % 'list', + target={'project_id': instance.project_id}) try: - groups = self.security_group_api.get_instance_security_groups( + groups = security_group_api.get_instance_security_groups( context, instance, True) except (exception.SecurityGroupNotFound, exception.InstanceNotFound) as exp: @@ -407,12 +402,9 @@ def index(self, req, server_id): class SecurityGroupActionController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(SecurityGroupActionController, self).__init__(*args, **kwargs) - self.security_group_api = ( - openstack_driver.get_openstack_security_group_driver()) - self.compute_api = compute.API( - security_group_api=self.security_group_api) + def __init__(self): + super(SecurityGroupActionController, self).__init__() + self.compute_api = compute.API() def _parse(self, body, action): try: @@ -431,28 +423,25 @@ def _parse(self, body, action): return group_name - def _invoke(self, method, context, id, group_name): - instance = common.get_instance(self.compute_api, context, id) - method(context, instance, group_name) - @wsgi.expected_errors((400, 404, 409)) @wsgi.response(202) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] - context.can(sg_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, id) + context.can(sg_policies.POLICY_NAME % 'add', + target={'project_id': instance.project_id}) group_name = self._parse(body, 'addSecurityGroup') try: - return self._invoke(self.security_group_api.add_to_instance, - context, id, group_name) + return security_group_api.add_to_instance(context, instance, + group_name) except (exception.SecurityGroupNotFound, exception.InstanceNotFound) as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.NoUniqueMatch as exp: raise exc.HTTPConflict(explanation=exp.format_message()) - except (exception.SecurityGroupCannotBeApplied, - exception.SecurityGroupExistsForInstance) as exp: + except exception.SecurityGroupCannotBeApplied as exp: raise exc.HTTPBadRequest(explanation=exp.format_message()) @wsgi.expected_errors((400, 404, 409)) @@ -460,83 +449,17 @@ def _addSecurityGroup(self, req, id, body): @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] - context.can(sg_policies.BASE_POLICY_NAME) + instance = common.get_instance(self.compute_api, context, id) + context.can(sg_policies.POLICY_NAME % 'remove', + target={'project_id': instance.project_id}) group_name = self._parse(body, 'removeSecurityGroup') try: - return self._invoke(self.security_group_api.remove_from_instance, - context, id, group_name) + return security_group_api.remove_from_instance(context, instance, + group_name) except (exception.SecurityGroupNotFound, exception.InstanceNotFound) as exp: raise exc.HTTPNotFound(explanation=exp.format_message()) except exception.NoUniqueMatch as exp: raise exc.HTTPConflict(explanation=exp.format_message()) - except exception.SecurityGroupNotExistsForInstance as exp: - raise exc.HTTPBadRequest(explanation=exp.format_message()) - - -class SecurityGroupsOutputController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(SecurityGroupsOutputController, self).__init__(*args, **kwargs) - self.compute_api = compute.API() - self.security_group_api = ( - openstack_driver.get_openstack_security_group_driver()) - - def _extend_servers(self, req, servers): - # TODO(arosen) this function should be refactored to reduce duplicate - # code and use get_instance_security_groups instead of get_db_instance. - if not len(servers): - return - key = "security_groups" - context = req.environ['nova.context'] - if not context.can(sg_policies.BASE_POLICY_NAME, fatal=False): - return - - if not openstack_driver.is_neutron_security_groups(): - for server in servers: - instance = req.get_db_instance(server['id']) - groups = instance.get(key) - if groups: - server[ATTRIBUTE_NAME] = [{"name": group.name} - for group in groups] - else: - # If method is a POST we get the security groups intended for an - # instance from the request. The reason for this is if using - # neutron security groups the requested security groups for the - # instance are not in the db and have not been sent to neutron yet. - if req.method != 'POST': - sg_instance_bindings = ( - self.security_group_api - .get_instances_security_groups_bindings(context, - servers)) - for server in servers: - groups = sg_instance_bindings.get(server['id']) - if groups: - server[ATTRIBUTE_NAME] = groups - - # In this section of code len(servers) == 1 as you can only POST - # one server in an API request. - else: - # try converting to json - req_obj = jsonutils.loads(req.body) - # Add security group to server, if no security group was in - # request add default since that is the group it is part of - servers[0][ATTRIBUTE_NAME] = req_obj['server'].get( - ATTRIBUTE_NAME, [{'name': 'default'}]) - - def _show(self, req, resp_obj): - if 'server' in resp_obj.obj: - self._extend_servers(req, [resp_obj.obj['server']]) - - @wsgi.extends - def show(self, req, resp_obj, id): - return self._show(req, resp_obj) - - @wsgi.extends - def create(self, req, resp_obj, body): - return self._show(req, resp_obj) - - @wsgi.extends - def detail(self, req, resp_obj): - self._extend_servers(req, list(resp_obj.obj['servers'])) diff --git a/nova/api/openstack/compute/server_diagnostics.py b/nova/api/openstack/compute/server_diagnostics.py index 5e091f74c10..325d81ab063 100644 --- a/nova/api/openstack/compute/server_diagnostics.py +++ b/nova/api/openstack/compute/server_diagnostics.py @@ -19,7 +19,7 @@ from nova.api.openstack import common from nova.api.openstack.compute.views import server_diagnostics from nova.api.openstack import wsgi -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import server_diagnostics as sd_policies @@ -27,16 +27,16 @@ class ServerDiagnosticsController(wsgi.Controller): _view_builder_class = server_diagnostics.ViewBuilder - def __init__(self, *args, **kwargs): - super(ServerDiagnosticsController, self).__init__(*args, **kwargs) + def __init__(self): + super(ServerDiagnosticsController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors((400, 404, 409, 501)) def index(self, req, server_id): context = req.environ["nova.context"] - context.can(sd_policies.BASE_POLICY_NAME) - instance = common.get_instance(self.compute_api, context, server_id) + context.can(sd_policies.BASE_POLICY_NAME, + target={'project_id': instance.project_id}) try: if api_version_request.is_supported(req, min_version='2.48'): diff --git a/nova/api/openstack/compute/server_external_events.py b/nova/api/openstack/compute/server_external_events.py index 81a303f9768..162bd93697a 100644 --- a/nova/api/openstack/compute/server_external_events.py +++ b/nova/api/openstack/compute/server_external_events.py @@ -13,14 +13,12 @@ # under the License. from oslo_log import log as logging -import webob from nova.api.openstack.compute.schemas import server_external_events from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import context as nova_context -from nova.i18n import _ from nova import objects from nova.policies import server_external_events as see_policies @@ -28,15 +26,19 @@ LOG = logging.getLogger(__name__) +TAG_REQUIRED = ('volume-extended', 'power-update', + 'accelerator-request-bound') + + class ServerExternalEventsController(wsgi.Controller): def __init__(self): - self.compute_api = compute.API() super(ServerExternalEventsController, self).__init__() + self.compute_api = compute.API() @staticmethod def _is_event_tag_present_when_required(event): - if event.name == 'volume-extended' and event.tag is None: + if event.name in TAG_REQUIRED and event.tag is None: return False return True @@ -62,14 +64,16 @@ def _get_instances_all_cells(self, context, instance_uuids, return instances - @wsgi.expected_errors((403, 404)) + @wsgi.expected_errors(403) @wsgi.response(200) @validation.schema(server_external_events.create, '2.0', '2.50') - @validation.schema(server_external_events.create_v251, '2.51') + @validation.schema(server_external_events.create_v251, '2.51', '2.75') + @validation.schema(server_external_events.create_v276, '2.76', '2.81') + @validation.schema(server_external_events.create_v282, '2.82') def create(self, req, body): """Creates a new instance event.""" context = req.environ['nova.context'] - context.can(see_policies.POLICY_ROOT % 'create') + context.can(see_policies.POLICY_ROOT % 'create', target={}) response_events = [] accepted_events = [] @@ -142,9 +146,6 @@ def create(self, req, body): if accepted_events: self.compute_api.external_instance_event( context, accepted_instances, accepted_events) - else: - msg = _('No instances found for any event') - raise webob.exc.HTTPNotFound(explanation=msg) # FIXME(cyeoh): This needs some infrastructure support so that # we have a general way to do this diff --git a/nova/api/openstack/compute/server_groups.py b/nova/api/openstack/compute/server_groups.py index 14fdd485f4e..955c75936bf 100644 --- a/nova/api/openstack/compute/server_groups.py +++ b/nova/api/openstack/compute/server_groups.py @@ -16,6 +16,7 @@ """The Server Group API Extension.""" import collections +import itertools from oslo_log import log as logging import webob @@ -42,18 +43,18 @@ GROUP_POLICY_OBJ_MICROVERSION = "2.64" -def _authorize_context(req, action): - context = req.environ['nova.context'] - context.can(sg_policies.POLICY_ROOT % action) - return context +def _get_not_deleted(context, uuids, not_deleted_inst=None): + if not_deleted_inst: + # short-cut if we already pre-built a list of not deleted instances to + # be more efficient + return {u: not_deleted_inst[u] for u in uuids + if u in not_deleted_inst} - -def _get_not_deleted(context, uuids): mappings = objects.InstanceMappingList.get_by_instance_uuids( context, uuids) inst_by_cell = collections.defaultdict(list) cell_mappings = {} - found_inst_uuids = [] + found_inst = {} # Get a master list of cell mappings, and a list of instance # uuids organized by cell @@ -61,7 +62,7 @@ def _get_not_deleted(context, uuids): if not im.cell_mapping: # Not scheduled yet, so just throw it in the final list # and move on - found_inst_uuids.append(im.instance_uuid) + found_inst[im.instance_uuid] = None continue if im.cell_mapping.uuid not in cell_mappings: cell_mappings[im.cell_mapping.uuid] = im.cell_mapping @@ -72,14 +73,14 @@ def _get_not_deleted(context, uuids): for cell_uuid, cell_mapping in cell_mappings.items(): inst_uuids = inst_by_cell[cell_uuid] LOG.debug('Querying cell %(cell)s for %(num)i instances', - {'cell': cell_mapping.identity, 'num': len(uuids)}) + {'cell': cell_mapping.identity, 'num': len(inst_uuids)}) filters = {'uuid': inst_uuids, 'deleted': False} with nova_context.target_cell(context, cell_mapping) as ctx: - found_inst_uuids.extend([ - inst.uuid for inst in objects.InstanceList.get_by_filters( - ctx, filters=filters)]) + instances = objects.InstanceList.get_by_filters( + ctx, filters=filters) + found_inst.update({inst.uuid: inst.host for inst in instances}) - return found_inst_uuids + return found_inst def _should_enable_custom_max_server_rules(context, rules): @@ -94,7 +95,16 @@ def _should_enable_custom_max_server_rules(context, rules): class ServerGroupController(wsgi.Controller): """The Server group API controller for the OpenStack API.""" - def _format_server_group(self, context, group, req): + def _format_server_group(self, context, group, req, + not_deleted_inst=None): + """Format ServerGroup according to API version. + + Displays only not-deleted members. + + :param:not_deleted_inst: Pre-built dict of instance-uuid: host for + multiple server-groups that are found to be + not deleted. + """ # the id field has its value as the uuid of the server group # There is no 'uuid' key in server_group seen by clients. # In addition, clients see policies as a ["policy-name"] list; @@ -114,7 +124,8 @@ def _format_server_group(self, context, group, req): members = [] if group.members: # Display the instances that are not deleted. - members = _get_not_deleted(context, group.members) + members = list(_get_not_deleted(context, group.members, + not_deleted_inst)) server_group['members'] = members # Add project id information to the response data for # API version v2.13 @@ -126,40 +137,65 @@ def _format_server_group(self, context, group, req): @wsgi.expected_errors(404) def show(self, req, id): """Return data about the given server group.""" - context = _authorize_context(req, 'show') + context = req.environ['nova.context'] try: sg = objects.InstanceGroup.get_by_uuid(context, id) except nova.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) + context.can(sg_policies.POLICY_ROOT % 'show', + target={'project_id': sg.project_id}) return {'server_group': self._format_server_group(context, sg, req)} @wsgi.response(204) @wsgi.expected_errors(404) def delete(self, req, id): """Delete a server group.""" - context = _authorize_context(req, 'delete') + context = req.environ['nova.context'] try: sg = objects.InstanceGroup.get_by_uuid(context, id) except nova.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) + context.can(sg_policies.POLICY_ROOT % 'delete', + target={'project_id': sg.project_id}) try: sg.destroy() except nova.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) @wsgi.expected_errors(()) - @validation.query_schema(schema.server_groups_query_param) + @validation.query_schema(schema.server_groups_query_param_275, '2.75') + @validation.query_schema(schema.server_groups_query_param, '2.0', '2.74') def index(self, req): """Returns a list of server groups.""" - context = _authorize_context(req, 'index') + context = req.environ['nova.context'] project_id = context.project_id + # NOTE(gmann): Using context's project_id as target here so + # that when we remove the default target from policy class, + # it does not fail if user requesting operation on for their + # own server group. + context.can(sg_policies.POLICY_ROOT % 'index', + target={'project_id': project_id}) if 'all_projects' in req.GET and context.is_admin: + # TODO(gmann): Remove the is_admin check in the above condition + # so that the below policy can raise error if not allowed. + # In existing behavior, if non-admin users requesting + # all projects server groups they do not get error instead + # get their own server groups. Once we switch to policy + # new defaults completly then we can remove the above check. + # Until then, let's keep the old behaviour. + context.can(sg_policies.POLICY_ROOT % 'index:all_projects', + target={}) sgs = objects.InstanceGroupList.get_all(context) else: sgs = objects.InstanceGroupList.get_by_project_id( context, project_id) limited_list = common.limited(sgs.objects, req) - result = [self._format_server_group(context, group, req) + + members = list(itertools.chain.from_iterable(sg.members + for sg in limited_list + if sg.members)) + not_deleted = _get_not_deleted(context, members) + result = [self._format_server_group(context, group, req, not_deleted) for group in limited_list] return {'server_groups': result} @@ -170,11 +206,13 @@ def index(self, req): @validation.schema(schema.create_v264, GROUP_POLICY_OBJ_MICROVERSION) def create(self, req, body): """Creates a new server group.""" - context = _authorize_context(req, 'create') - + context = req.environ['nova.context'] + project_id = context.project_id + context.can(sg_policies.POLICY_ROOT % 'create', + target={'project_id': project_id}) try: objects.Quotas.check_deltas(context, {'server_groups': 1}, - context.project_id, context.user_id) + project_id, context.user_id) except nova.exception.OverQuota: msg = _("Quota exceeded, too many server groups.") raise exc.HTTPForbidden(explanation=msg) @@ -200,7 +238,7 @@ def create(self, req, body): sg = objects.InstanceGroup(context, policy=policies[0]) try: sg.name = vals.get('name') - sg.project_id = context.project_id + sg.project_id = project_id sg.user_id = context.user_id sg.create() except ValueError as e: @@ -213,7 +251,7 @@ def create(self, req, body): if CONF.quota.recheck_quota: try: objects.Quotas.check_deltas(context, {'server_groups': 0}, - context.project_id, + project_id, context.user_id) except nova.exception.OverQuota: sg.destroy() @@ -221,3 +259,131 @@ def create(self, req, body): raise exc.HTTPForbidden(explanation=msg) return {'server_group': self._format_server_group(context, sg, req)} + + @wsgi.Controller.api_version("2.64") + @validation.schema(schema.update) + @wsgi.expected_errors((400, 404)) + def update(self, req, id, body): + """Update a server-group's members + + Striving for idempotency, we accept already removed or already + contained members. + + We always remove first and then check if we can add the requested + members. That way, removing an instance for a host and adding another + one works in one request. + + We do all requested changes or no change. + """ + context = req.environ['nova.context'] + try: + sg = objects.InstanceGroup.get_by_uuid(context, id) + except nova.exception.InstanceGroupNotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + members_to_remove = set(body.get('remove_members', [])) + members_to_add = set(body.get('add_members', [])) + LOG.info('Called update for server-group %s with add_members: %s and ' + 'remove_members %s', + id, ', '.join(members_to_add), ', '.join(members_to_remove)) + + overlap = members_to_remove & members_to_add + if overlap: + msg = ('Parameters "add_members" and "remove_members" are ' + 'overlapping in {}'.format(', '.join(overlap))) + raise exc.HTTPBadRequest(explanation=msg) + + if not members_to_remove and not members_to_add: + LOG.info("No update requested.") + formatted_sg = self._format_server_group(context, sg, req) + return {'server_group': formatted_sg} + + # don't do work if it's not necessary. we might be able to get a fast + # way out if this request is already fulfilled + members_to_remove = members_to_remove & set(sg.members) + members_to_add = members_to_add - set(sg.members) + + if not members_to_remove and not members_to_add: + LOG.info("State already satisfied.") + formatted_sg = self._format_server_group(context, sg, req) + return {'server_group': formatted_sg} + + # retrieve all the instances to add, failing if one doesn't exist, + # because we need to check the hosts against the policy and adding + # non-existent instances doesn't make sense + found_instances_hosts = _get_not_deleted(context, members_to_add) + missing_uuids = members_to_add - set(found_instances_hosts) + if missing_uuids: + msg = ("One or more members in add_members cannot be found: {}" + .format(', '.join(missing_uuids))) + raise exc.HTTPBadRequest(explanation=msg) + + # check if (some of) the VMs are already members of another + # instance_group. We cannot support this as they might contradict. + found_server_groups = \ + objects.InstanceGroupList.get_by_instance_uuids(context, + members_to_add) + other_server_groups = [_x.uuid for _x in found_server_groups + if _x.uuid != id] + if other_server_groups: + msg = ("One ore more members in add_members is already assigned " + "to another server group. Server groups: {}" + .format(', '.join(other_server_groups))) + raise exc.HTTPBadRequest(explanation=msg) + + # check if the policy is still valid with these changes + if sg.policy in ('affinity', 'anti-affinity'): + current_members_hosts = _get_not_deleted(context, sg.members) + current_hosts = set(h for u, h in current_members_hosts.items() + if u not in members_to_remove) + if sg.policy == 'affinity': + outliers = [u for u, h in found_instances_hosts.items() + if h and h not in current_hosts] + elif sg.policy == 'anti-affinity': + outliers = [u for u, h in found_instances_hosts.items() + if h and h in current_hosts] + else: + outliers = None + LOG.warning('server-group update check not implemented for ' + 'policy %s', sg.policy) + if outliers: + LOG.info('Update of server-group %s with policy %s aborted: ' + 'policy violation by %s', + id, sg.policy, ', '.join(outliers)) + msg = ("Adding instance(s) {} would violate policy '{}'." + .format(', '.join(outliers), sg.policy)) + raise exc.HTTPBadRequest(explanation=msg) + + # update the server group and save it + if members_to_remove: + objects.InstanceGroup.remove_members(context, sg.id, + members_to_remove, sg.uuid) + if members_to_add: + try: + objects.InstanceGroup.add_members(context, id, members_to_add) + except Exception: + LOG.exception('Failed to add members.') + if members_to_remove: + LOG.info('Trying to add removed members again after ' + 'error.') + objects.InstanceGroup.add_members(context, id, + members_to_remove) + raise + + LOG.info("Changed server-group %s in DB.", id) + + # refresh InstanceGroup object, because we changed it directly in the + # DB. + sg.refresh() + + # update the request-specs of the updated members + for member_uuid in found_instances_hosts: + request_spec = \ + objects.RequestSpec.get_by_instance_uuid(context, member_uuid) + if member_uuid in members_to_add: + request_spec.instance_group = sg + else: + request_spec.instance_group = None + request_spec.save() + + return {'server_group': self._format_server_group(context, sg, req)} diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py index b0b0fbf7731..448441a346c 100644 --- a/nova/api/openstack/compute/server_metadata.py +++ b/nova/api/openstack/compute/server_metadata.py @@ -20,7 +20,7 @@ from nova.api.openstack.compute.schemas import server_metadata from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import server_metadata as sm_policies @@ -30,11 +30,10 @@ class ServerMetadataController(wsgi.Controller): """The server metadata API controller for the OpenStack API.""" def __init__(self): - self.compute_api = compute.API() super(ServerMetadataController, self).__init__() + self.compute_api = compute.API() - def _get_metadata(self, context, server_id): - server = common.get_instance(self.compute_api, context, server_id) + def _get_metadata(self, context, server): try: # NOTE(mikal): get_instance_metadata sometimes returns # InstanceNotFound in unit tests, even though the instance is @@ -52,8 +51,10 @@ def _get_metadata(self, context, server_id): def index(self, req, server_id): """Returns the list of metadata for a given instance.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'index') - return {'metadata': self._get_metadata(context, server_id)} + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'index', + target={'project_id': server.project_id}) + return {'metadata': self._get_metadata(context, server)} @wsgi.expected_errors((403, 404, 409)) # NOTE(gmann): Returns 200 for backwards compatibility but should be 201 @@ -62,9 +63,11 @@ def index(self, req, server_id): def create(self, req, server_id, body): metadata = body['metadata'] context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'create') + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'create', + target={'project_id': server.project_id}) new_metadata = self._update_instance_metadata(context, - server_id, + server, metadata, delete=False) @@ -74,14 +77,16 @@ def create(self, req, server_id, body): @validation.schema(server_metadata.update) def update(self, req, server_id, id, body): context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'update') + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'update', + target={'project_id': server.project_id}) meta_item = body['meta'] if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) self._update_instance_metadata(context, - server_id, + server, meta_item, delete=False) @@ -91,43 +96,40 @@ def update(self, req, server_id, id, body): @validation.schema(server_metadata.update_all) def update_all(self, req, server_id, body): context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'update_all') + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'update_all', + target={'project_id': server.project_id}) metadata = body['metadata'] new_metadata = self._update_instance_metadata(context, - server_id, + server, metadata, delete=True) return {'metadata': new_metadata} - def _update_instance_metadata(self, context, server_id, metadata, + def _update_instance_metadata(self, context, server, metadata, delete=False): - server = common.get_instance(self.compute_api, context, server_id) try: return self.compute_api.update_instance_metadata(context, server, metadata, delete) - - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.QuotaError as error: raise exc.HTTPForbidden(explanation=error.format_message()) - except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) - except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, - 'update metadata', server_id) + 'update metadata', server.uuid) @wsgi.expected_errors(404) def show(self, req, server_id, id): """Return a single metadata item.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'show') - data = self._get_metadata(context, server_id) + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'show', + target={'project_id': server.project_id}) + data = self._get_metadata(context, server) try: return {'meta': {id: data[id]}} @@ -140,23 +142,19 @@ def show(self, req, server_id, id): def delete(self, req, server_id, id): """Deletes an existing metadata.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'delete') - metadata = self._get_metadata(context, server_id) + server = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'delete', + target={'project_id': server.project_id}) + metadata = self._get_metadata(context, server) if id not in metadata: msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) - server = common.get_instance(self.compute_api, context, server_id) try: self.compute_api.delete_instance_metadata(context, server, id) - - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) - except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'delete metadata', server_id) diff --git a/nova/api/openstack/compute/server_migrations.py b/nova/api/openstack/compute/server_migrations.py index 3e309712304..230d6088fe7 100644 --- a/nova/api/openstack/compute/server_migrations.py +++ b/nova/api/openstack/compute/server_migrations.py @@ -20,13 +20,13 @@ from nova.api.openstack.compute.schemas import server_migrations from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova.policies import servers_migrations as sm_policies -def output(migration, include_uuid=False): +def output(migration, include_uuid=False, include_user_project=False): """Returns the desired output of the API from an object. From a Migrations's object this method returns the primitive @@ -52,6 +52,9 @@ def output(migration, include_uuid=False): } if include_uuid: result['uuid'] = migration.uuid + if include_user_project: + result['user_id'] = migration.user_id + result['project_id'] = migration.project_id return result @@ -59,8 +62,8 @@ class ServerMigrationsController(wsgi.Controller): """The server migrations API controller for the OpenStack API.""" def __init__(self): - self.compute_api = compute.API() super(ServerMigrationsController, self).__init__() + self.compute_api = compute.API() @wsgi.Controller.api_version("2.22") @wsgi.response(202) @@ -69,9 +72,10 @@ def __init__(self): @validation.schema(server_migrations.force_complete) def _force_complete(self, req, id, server_id, body): context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'force_complete') - instance = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'force_complete', + target={'project_id': instance.project_id}) + try: self.compute_api.live_migrate_force_complete(context, instance, id) except exception.InstanceNotFound as e: @@ -91,29 +95,34 @@ def _force_complete(self, req, id, server_id, body): def index(self, req, server_id): """Return all migrations of an instance in progress.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'index') - # NOTE(Shaohe Feng) just check the instance is available. To keep # consistency with other API, check it before get migrations. - common.get_instance(self.compute_api, context, server_id) + instance = common.get_instance(self.compute_api, context, server_id) + + context.can(sm_policies.POLICY_ROOT % 'index', + target={'project_id': instance.project_id}) migrations = self.compute_api.get_migrations_in_progress_by_instance( context, server_id, 'live-migration') include_uuid = api_version_request.is_supported(req, '2.59') - return {'migrations': [output( - migration, include_uuid) for migration in migrations]} + + include_user_project = api_version_request.is_supported(req, '2.80') + return {'migrations': [ + output(migration, include_uuid, include_user_project) + for migration in migrations]} @wsgi.Controller.api_version("2.23") @wsgi.expected_errors(404) def show(self, req, server_id, id): """Return the migration of an instance in progress by id.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'show') - # NOTE(Shaohe Feng) just check the instance is available. To keep # consistency with other API, check it before get migrations. - common.get_instance(self.compute_api, context, server_id) + instance = common.get_instance(self.compute_api, context, server_id) + + context.can(sm_policies.POLICY_ROOT % 'show', + target={'project_id': instance.project_id}) try: migration = self.compute_api.get_migration_by_id_and_instance( @@ -123,7 +132,7 @@ def show(self, req, server_id, id): " server %(uuid)s.") % {"id": id, "uuid": server_id} raise exc.HTTPNotFound(explanation=msg) - if migration.get("migration_type") != "live-migration": + if not migration.is_live_migration: msg = _("Migration %(id)s for server %(uuid)s is not" " live-migration.") % {"id": id, "uuid": server_id} raise exc.HTTPNotFound(explanation=msg) @@ -136,7 +145,10 @@ def show(self, req, server_id, id): raise exc.HTTPNotFound(explanation=msg) include_uuid = api_version_request.is_supported(req, '2.59') - return {'migration': output(migration, include_uuid)} + + include_user_project = api_version_request.is_supported(req, '2.80') + return {'migration': output(migration, include_uuid, + include_user_project)} @wsgi.Controller.api_version("2.24") @wsgi.response(202) @@ -144,11 +156,12 @@ def show(self, req, server_id, id): def delete(self, req, server_id, id): """Abort an in progress migration of an instance.""" context = req.environ['nova.context'] - context.can(sm_policies.POLICY_ROOT % 'delete') + instance = common.get_instance(self.compute_api, context, server_id) + context.can(sm_policies.POLICY_ROOT % 'delete', + target={'project_id': instance.project_id}) support_abort_in_queue = api_version_request.is_supported(req, '2.65') - instance = common.get_instance(self.compute_api, context, server_id) try: self.compute_api.live_migrate_abort( context, instance, id, @@ -160,5 +173,3 @@ def delete(self, req, server_id, id): raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InvalidMigrationState as e: raise exc.HTTPBadRequest(explanation=e.format_message()) - except exception.AbortQueuedLiveMigrationNotYetSupported as e: - raise exc.HTTPConflict(explanation=e.format_message()) diff --git a/nova/api/openstack/compute/server_password.py b/nova/api/openstack/compute/server_password.py index a953ff8c889..c7caf04b602 100644 --- a/nova/api/openstack/compute/server_password.py +++ b/nova/api/openstack/compute/server_password.py @@ -18,20 +18,22 @@ from nova.api.metadata import password from nova.api.openstack import common from nova.api.openstack import wsgi -from nova import compute +from nova.compute import api as compute from nova.policies import server_password as sp_policies class ServerPasswordController(wsgi.Controller): """The Server Password API controller for the OpenStack API.""" def __init__(self): + super(ServerPasswordController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors(404) def index(self, req, server_id): context = req.environ['nova.context'] - context.can(sp_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, server_id) + context.can(sp_policies.BASE_POLICY_NAME % 'show', + target={'project_id': instance.project_id}) passw = password.extract_password(instance) return {'password': passw or ''} @@ -46,8 +48,9 @@ def clear(self, req, server_id): """ context = req.environ['nova.context'] - context.can(sp_policies.BASE_POLICY_NAME) instance = common.get_instance(self.compute_api, context, server_id) + context.can(sp_policies.BASE_POLICY_NAME % 'clear', + target={'project_id': instance.project_id}) meta = password.convert_password(context, None) instance.system_metadata.update(meta) instance.save() diff --git a/nova/api/openstack/compute/server_tags.py b/nova/api/openstack/compute/server_tags.py index d39c0d0ad82..d672c479e9e 100644 --- a/nova/api/openstack/compute/server_tags.py +++ b/nova/api/openstack/compute/server_tags.py @@ -19,7 +19,7 @@ from nova.api.openstack import wsgi from nova.api import validation from nova.api.validation import parameter_types -from nova import compute +from nova.compute import api as compute from nova.compute import vm_states from nova import context as nova_context from nova import exception @@ -45,8 +45,8 @@ class ServerTagsController(wsgi.Controller): _view_builder_class = server_tags.ViewBuilder def __init__(self): - self.compute_api = compute.API() super(ServerTagsController, self).__init__() + self.compute_api = compute.API() def _check_instance_in_valid_state(self, context, server_id, action): instance = common.get_instance(self.compute_api, context, server_id) @@ -65,15 +65,14 @@ def _check_instance_in_valid_state(self, context, server_id, action): @wsgi.expected_errors(404) def show(self, req, server_id, id): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'show') + im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'show', + target={'project_id': im.project_id}) try: - im = objects.InstanceMapping.get_by_instance_uuid(context, - server_id) with nova_context.target_cell(context, im.cell_mapping) as cctxt: exists = objects.Tag.exists(cctxt, server_id, id) - except (exception.InstanceNotFound, - exception.InstanceMappingNotFound) as e: + except (exception.InstanceNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) if not exists: @@ -85,15 +84,14 @@ def show(self, req, server_id, id): @wsgi.expected_errors(404) def index(self, req, server_id): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'index') + im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'index', + target={'project_id': im.project_id}) try: - im = objects.InstanceMapping.get_by_instance_uuid(context, - server_id) with nova_context.target_cell(context, im.cell_mapping) as cctxt: tags = objects.TagList.get_by_resource_id(cctxt, server_id) - except (exception.InstanceNotFound, - exception.InstanceMappingNotFound) as e: + except (exception.InstanceNotFound) as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) return {'tags': _get_tags_names(tags)} @@ -103,8 +101,9 @@ def index(self, req, server_id): @validation.schema(schema.update) def update(self, req, server_id, id, body): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'update') im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'update', + target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( @@ -155,8 +154,9 @@ def update(self, req, server_id, id, body): @validation.schema(schema.update_all) def update_all(self, req, server_id, body): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'update_all') im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'update_all', + target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( @@ -179,8 +179,9 @@ def update_all(self, req, server_id, body): @wsgi.expected_errors((404, 409)) def delete(self, req, server_id, id): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'delete') im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'delete', + target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( @@ -203,8 +204,9 @@ def delete(self, req, server_id, id): @wsgi.expected_errors((404, 409)) def delete_all(self, req, server_id): context = req.environ["nova.context"] - context.can(st_policies.POLICY_ROOT % 'delete_all') im = _get_instance_mapping(context, server_id) + context.can(st_policies.POLICY_ROOT % 'delete_all', + target={'project_id': im.project_id}) with nova_context.target_cell(context, im.cell_mapping) as cctxt: instance = self._check_instance_in_valid_state( diff --git a/nova/api/openstack/compute/server_topology.py b/nova/api/openstack/compute/server_topology.py new file mode 100644 index 00000000000..9d4cc4a5d6d --- /dev/null +++ b/nova/api/openstack/compute/server_topology.py @@ -0,0 +1,75 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.api.openstack import common +from nova.api.openstack import wsgi +from nova.compute import api as compute +from nova.policies import server_topology as st_policies + + +class ServerTopologyController(wsgi.Controller): + + def __init__(self, *args, **kwargs): + super(ServerTopologyController, self).__init__(*args, **kwargs) + self.compute_api = compute.API() + + @wsgi.Controller.api_version("2.78") + @wsgi.expected_errors(404) + def index(self, req, server_id): + context = req.environ["nova.context"] + instance = common.get_instance(self.compute_api, context, server_id, + expected_attrs=['numa_topology', + 'vcpu_model']) + + context.can(st_policies.BASE_POLICY_NAME % 'index', + target={'project_id': instance.project_id}) + + host_policy = (st_policies.BASE_POLICY_NAME % 'host:index') + show_host_info = context.can(host_policy, fatal=False) + + return self._get_numa_topology(context, instance, show_host_info) + + def _get_numa_topology(self, context, instance, show_host_info): + + if instance.numa_topology is None: + return { + 'nodes': [], + 'pagesize_kb': None + } + + topo = {} + cells = [] + pagesize_kb = None + + for cell_ in instance.numa_topology.cells: + cell = {} + cell['vcpu_set'] = cell_.total_cpus + cell['siblings'] = cell_.siblings + cell['memory_mb'] = cell_.memory + + if show_host_info: + cell['host_node'] = cell_.id + if cell_.cpu_pinning is None: + cell['cpu_pinning'] = {} + else: + cell['cpu_pinning'] = cell_.cpu_pinning + + if cell_.pagesize: + pagesize_kb = cell_.pagesize + + cells.append(cell) + + topo['nodes'] = cells + topo['pagesize_kb'] = pagesize_kb + + return topo diff --git a/nova/api/openstack/compute/server_usage.py b/nova/api/openstack/compute/server_usage.py deleted file mode 100644 index 5448f8bc23f..00000000000 --- a/nova/api/openstack/compute/server_usage.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova.api.openstack import wsgi -from nova.policies import server_usage as su_policies - - -resp_topic = "OS-SRV-USG" - - -class ServerUsageController(wsgi.Controller): - - def _extend_server(self, server, instance): - for k in ['launched_at', 'terminated_at']: - key = "%s:%s" % (resp_topic, k) - # NOTE(danms): Historically, this timestamp has been generated - # merely by grabbing str(datetime) of a TZ-naive object. The - # only way we can keep that with instance objects is to strip - # the tzinfo from the stamp and str() it. - server[key] = (instance[k].replace(tzinfo=None) - if instance[k] else None) - - @wsgi.extends - def show(self, req, resp_obj, id): - context = req.environ['nova.context'] - if context.can(su_policies.BASE_POLICY_NAME, fatal=False): - server = resp_obj.obj['server'] - db_instance = req.get_db_instance(server['id']) - # server['id'] is guaranteed to be in the cache due to - # the core API adding it in its 'show' method. - self._extend_server(server, db_instance) - - @wsgi.extends - def detail(self, req, resp_obj): - context = req.environ['nova.context'] - if context.can(su_policies.BASE_POLICY_NAME, fatal=False): - servers = list(resp_obj.obj['servers']) - for server in servers: - db_instance = req.get_db_instance(server['id']) - # server['id'] is guaranteed to be in the cache due to - # the core API adding it in its 'detail' method. - self._extend_server(server, db_instance) diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 73e3a8fb6d8..8fbc93e3763 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -21,7 +21,6 @@ from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils -import six import webob from webob import exc @@ -33,26 +32,64 @@ from nova.api.openstack import wsgi from nova.api import validation from nova import block_device -from nova import compute +from nova.compute import api as compute from nova.compute import flavors from nova.compute import utils as compute_utils import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ -from nova.image import api as image_api +from nova.image import glance from nova import objects -from nova.objects import service as service_obj from nova.policies import servers as server_policies from nova import utils TAG_SEARCH_FILTERS = ('tags', 'tags-any', 'not-tags', 'not-tags-any') -DEVICE_TAGGING_MIN_COMPUTE_VERSION = 14 +PARTIAL_CONSTRUCT_FOR_CELL_DOWN_MIN_VERSION = '2.69' +PAGING_SORTING_PARAMS = ('sort_key', 'sort_dir', 'limit', 'marker') CONF = nova.conf.CONF LOG = logging.getLogger(__name__) +INVALID_FLAVOR_IMAGE_EXCEPTIONS = ( + exception.BadRequirementEmulatorThreadsPolicy, + exception.CPUThreadPolicyConfigurationInvalid, + exception.FlavorImageConflict, + exception.FlavorDiskTooSmall, + exception.FlavorMemoryTooSmall, + exception.ImageCPUPinningForbidden, + exception.ImageCPUThreadPolicyForbidden, + exception.ImageNUMATopologyAsymmetric, + exception.ImageNUMATopologyCPUDuplicates, + exception.ImageNUMATopologyCPUOutOfRange, + exception.ImageNUMATopologyCPUsUnassigned, + exception.ImageNUMATopologyForbidden, + exception.ImageNUMATopologyIncomplete, + exception.ImageNUMATopologyMemoryOutOfRange, + exception.ImageNUMATopologyRebuildConflict, + exception.ImagePMUConflict, + exception.ImageSerialPortNumberExceedFlavorValue, + exception.ImageSerialPortNumberInvalid, + exception.ImageVCPULimitsRangeExceeded, + exception.ImageVCPUTopologyRangeExceeded, + exception.InvalidCPUAllocationPolicy, + exception.InvalidCPUThreadAllocationPolicy, + exception.InvalidEmulatorThreadsPolicy, + exception.InvalidMachineType, + exception.InvalidNUMANodesNumber, + exception.InvalidRequest, + exception.MemoryPageSizeForbidden, + exception.MemoryPageSizeInvalid, + exception.PciInvalidAlias, + exception.PciRequestAliasNotDefined, + exception.RealtimeConfigurationInvalid, + exception.RealtimeMaskNotFoundOrInvalid, + exception.RequiredMixedInstancePolicy, + exception.RequiredMixedOrRealtimeCPUMask, + exception.InvalidMixedInstanceDedicatedMask, +) + class ServersController(wsgi.Controller): """The Server API base controller class for the OpenStack API.""" @@ -65,20 +102,23 @@ def _add_location(robj): if 'server' not in robj.obj: return robj - link = [l for l in robj.obj['server']['links'] if l['rel'] == 'self'] + link = [link for link in robj.obj['server'][ + 'links'] if link['rel'] == 'self'] if link: - robj['Location'] = utils.utf8(link[0]['href']) + robj['Location'] = link[0]['href'] # Convenience return return robj - def __init__(self, **kwargs): - - super(ServersController, self).__init__(**kwargs) + def __init__(self): + super(ServersController, self).__init__() self.compute_api = compute.API() @wsgi.expected_errors((400, 403)) - @validation.query_schema(schema_servers.query_params_v226, '2.26') + @validation.query_schema(schema_servers.query_params_v275, '2.75') + @validation.query_schema(schema_servers.query_params_v273, '2.73', '2.74') + @validation.query_schema(schema_servers.query_params_v266, '2.66', '2.72') + @validation.query_schema(schema_servers.query_params_v226, '2.26', '2.65') @validation.query_schema(schema_servers.query_params_v21, '2.1', '2.25') def index(self, req): """Returns a list of server names and ids for a given user.""" @@ -91,7 +131,10 @@ def index(self, req): return servers @wsgi.expected_errors((400, 403)) - @validation.query_schema(schema_servers.query_params_v226, '2.26') + @validation.query_schema(schema_servers.query_params_v275, '2.75') + @validation.query_schema(schema_servers.query_params_v273, '2.73', '2.74') + @validation.query_schema(schema_servers.query_params_v266, '2.66', '2.72') + @validation.query_schema(schema_servers.query_params_v226, '2.26', '2.65') @validation.query_schema(schema_servers.query_params_v21, '2.1', '2.25') def detail(self, req): """Returns a list of server details for a given user.""" @@ -103,6 +146,32 @@ def detail(self, req): raise exc.HTTPBadRequest(explanation=err.format_message()) return servers + @staticmethod + def _is_cell_down_supported(req, search_opts): + cell_down_support = api_version_request.is_supported( + req, min_version=PARTIAL_CONSTRUCT_FOR_CELL_DOWN_MIN_VERSION) + + if cell_down_support: + # NOTE(tssurya): Minimal constructs would be returned from the down + # cells if cell_down_support is True, however if filtering, sorting + # or paging is requested by the user, then cell_down_support should + # be made False and the down cells should be skipped (depending on + # CONF.api.list_records_by_skipping_down_cells) as there is no + # way to return correct results for the down cells in those + # situations due to missing keys/information. + # NOTE(tssurya): Since there is a chance that + # remove_invalid_options function could have removed the paging and + # sorting parameters, we add the additional check for that from the + # request. + pag_sort = any( + ps in req.GET.keys() for ps in PAGING_SORTING_PARAMS) + # NOTE(tssurya): ``nova list --all_tenants`` is the only + # allowed filter exception when handling down cells. + filters = list(search_opts.keys()) not in ([u'all_tenants'], []) + if pag_sort or filters: + cell_down_support = False + return cell_down_support + def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" @@ -113,6 +182,8 @@ def _get_servers(self, req, is_detail): remove_invalid_options(context, search_opts, self._get_server_search_options(req)) + cell_down_support = self._is_cell_down_supported(req, search_opts) + for search_opt in search_opts: if (search_opt in schema_servers.JOINED_TABLE_QUERY_PARAMS_SERVERS.keys() or @@ -154,15 +225,32 @@ def _get_servers(self, req, is_detail): msg = _("Invalid filter field: changes-since.") raise exc.HTTPBadRequest(explanation=msg) + if 'changes-before' in search_opts: + try: + search_opts['changes-before'] = timeutils.parse_isotime( + search_opts['changes-before']) + changes_since = search_opts.get('changes-since') + if changes_since and search_opts['changes-before'] < \ + search_opts['changes-since']: + msg = _('The value of changes-since must be' + ' less than or equal to changes-before.') + raise exc.HTTPBadRequest(explanation=msg) + except ValueError: + msg = _("Invalid filter field: changes-before.") + raise exc.HTTPBadRequest(explanation=msg) + # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. - # ... Unless 'changes-since' is specified, because 'changes-since' - # should return recently deleted instances according to the API spec. + # ... Unless 'changes-since' or 'changes-before' is specified, + # because those will return recently deleted instances according to + # the API spec. if 'deleted' not in search_opts: - if 'changes-since' not in search_opts: - # No 'changes-since', so we only want non-deleted servers + if 'changes-since' not in search_opts and \ + 'changes-before' not in search_opts: + # No 'changes-since' or 'changes-before', so we only + # want non-deleted servers search_opts['deleted'] = False else: # Convert deleted filter value to a valid boolean. @@ -184,32 +272,18 @@ def _get_servers(self, req, is_detail): search_opts[tag_filter] = search_opts[ tag_filter].split(',') - # If tenant_id is passed as a search parameter this should - # imply that all_tenants is also enabled unless explicitly - # disabled. Note that the tenant_id parameter is filtered out - # by remove_invalid_options above unless the requestor is an - # admin. - - # TODO(gmann): 'all_tenants' flag should not be required while - # searching with 'tenant_id'. Ref bug# 1185290 - # +microversions to achieve above mentioned behavior by - # uncommenting below code. - - # if 'tenant_id' in search_opts and 'all_tenants' not in search_opts: - # We do not need to add the all_tenants flag if the tenant - # id associated with the token is the tenant id - # specified. This is done so a request that does not need - # the all_tenants flag does not fail because of lack of - # policy permission for compute:get_all_tenants when it - # doesn't actually need it. - # if context.project_id != search_opts.get('tenant_id'): - # search_opts['all_tenants'] = 1 - all_tenants = common.is_all_tenants(search_opts) # use the boolean from here on out so remove the entry from search_opts - # if it's present + # if it's present. + # NOTE(tssurya): In case we support handling down cells + # we need to know further down the stack whether the 'all_tenants' + # filter was passed with the true value or not, so we pass the flag + # further down the stack. search_opts.pop('all_tenants', None) + if 'locked' in search_opts: + search_opts['locked'] = common.is_locked(search_opts) + elevated = None if all_tenants: if is_detail: @@ -219,9 +293,7 @@ def _get_servers(self, req, is_detail): elevated = context.elevated() else: # As explained in lp:#1185290, if `all_tenants` is not passed - # we must ignore the `tenant_id` search option. As explained - # in a above code comment, any change to this behavior would - # require a microversion bump. + # we must ignore the `tenant_id` search option. search_opts.pop('tenant_id', None) if context.project_id: search_opts['project_id'] = context.project_id @@ -230,9 +302,11 @@ def _get_servers(self, req, is_detail): limit, marker = common.get_limit_and_marker(req) sort_keys, sort_dirs = common.get_sort_params(req.params) + blacklist = schema_servers.SERVER_LIST_IGNORE_SORT_KEY + if api_version_request.is_supported(req, min_version='2.73'): + blacklist = schema_servers.SERVER_LIST_IGNORE_SORT_KEY_V273 sort_keys, sort_dirs = remove_invalid_sort_keys( - context, sort_keys, sort_dirs, - schema_servers.SERVER_LIST_IGNORE_SORT_KEY, ('host', 'node')) + context, sort_keys, sort_dirs, blacklist, ('host', 'node')) expected_attrs = [] if is_detail: @@ -242,6 +316,8 @@ def _get_servers(self, req, is_detail): expected_attrs.append("tags") if api_version_request.is_supported(req, '2.63'): expected_attrs.append("trusted_certs") + if api_version_request.is_supported(req, '2.73'): + expected_attrs.append("system_metadata") # merge our expected attrs with what the view builder needs for # showing details @@ -251,8 +327,9 @@ def _get_servers(self, req, is_detail): try: instance_list = self.compute_api.get_all(elevated or context, search_opts=search_opts, limit=limit, marker=marker, - expected_attrs=expected_attrs, - sort_keys=sort_keys, sort_dirs=sort_dirs) + expected_attrs=expected_attrs, sort_keys=sort_keys, + sort_dirs=sort_dirs, cell_down_support=cell_down_support, + all_tenants=all_tenants) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) @@ -264,20 +341,28 @@ def _get_servers(self, req, is_detail): if is_detail: instance_list._context = context instance_list.fill_faults() - response = self._view_builder.detail(req, instance_list) + response = self._view_builder.detail( + req, instance_list, cell_down_support=cell_down_support) else: - response = self._view_builder.index(req, instance_list) - req.cache_db_instances(instance_list) + response = self._view_builder.index( + req, instance_list, cell_down_support=cell_down_support) return response - def _get_server(self, context, req, instance_uuid, is_detail=False): + def _get_server(self, context, req, instance_uuid, is_detail=False, + cell_down_support=False, columns_to_join=None): """Utility function for looking up an instance by uuid. :param context: request context for auth - :param req: HTTP request. The instance is cached in this request. + :param req: HTTP request. :param instance_uuid: UUID of the server instance to get :param is_detail: True if you plan on showing the details of the instance in the response, False otherwise. + :param cell_down_support: True if the API (and caller) support + returning a minimal instance + construct if the relevant cell is + down. + :param columns_to_join: optional list of extra fields to join on the + Instance object """ expected_attrs = ['flavor', 'numa_topology'] if is_detail: @@ -287,20 +372,19 @@ def _get_server(self, context, req, instance_uuid, is_detail=False): expected_attrs.append("trusted_certs") expected_attrs = self._view_builder.get_show_expected_attrs( expected_attrs) + if columns_to_join: + expected_attrs.extend(columns_to_join) instance = common.get_instance(self.compute_api, context, instance_uuid, - expected_attrs=expected_attrs) - req.cache_db_instance(instance) + expected_attrs=expected_attrs, + cell_down_support=cell_down_support) return instance @staticmethod def _validate_network_id(net_id, network_uuids): """Validates that a requested network id. - This method performs two checks: - - 1. That the network id is in the proper uuid format. - 2. That the network is not a duplicate when using nova-network. + This method checks that the network id is in the proper UUID format. :param net_id: The network id to validate. :param network_uuids: A running list of requested network IDs that have @@ -308,34 +392,24 @@ def _validate_network_id(net_id, network_uuids): :raises: webob.exc.HTTPBadRequest if validation fails """ if not uuidutils.is_uuid_like(net_id): - # NOTE(mriedem): Neutron would allow a network id with a br- prefix - # back in Folsom so continue to honor that. - # TODO(mriedem): Need to figure out if this is still a valid case. - br_uuid = net_id.split('-', 1)[-1] - if not uuidutils.is_uuid_like(br_uuid): - msg = _("Bad networks format: network uuid is " - "not in proper format (%s)") % net_id - raise exc.HTTPBadRequest(explanation=msg) - - # duplicate networks are allowed only for neutron v2.0 - if net_id in network_uuids and not utils.is_neutron(): - expl = _("Duplicate networks (%s) are not allowed") % net_id - raise exc.HTTPBadRequest(explanation=expl) + msg = _("Bad networks format: network uuid is " + "not in proper format (%s)") % net_id + raise exc.HTTPBadRequest(explanation=msg) - def _get_requested_networks(self, requested_networks, - supports_device_tagging=False): + def _get_requested_networks(self, requested_networks): """Create a list of requested networks from the networks attribute.""" # Starting in the 2.37 microversion, requested_networks is either a # list or a string enum with value 'auto' or 'none'. The auto/none # values are verified via jsonschema so we don't check them again here. - if isinstance(requested_networks, six.string_types): + if isinstance(requested_networks, str): return objects.NetworkRequestList( objects=[objects.NetworkRequest( network_id=requested_networks)]) networks = [] network_uuids = [] + port_uuids = [] for network in requested_networks: request = objects.NetworkRequest() try: @@ -344,25 +418,31 @@ def _get_requested_networks(self, requested_networks, # it will use one of the available IP address from the network request.address = network.get('fixed_ip', None) request.port_id = network.get('port', None) - request.tag = network.get('tag', None) - if request.tag and not supports_device_tagging: - msg = _('Network interface tags are not yet supported.') - raise exc.HTTPBadRequest(explanation=msg) if request.port_id: - request.network_id = None - if not utils.is_neutron(): - # port parameter is only for neutron v2.0 - msg = _("Unknown argument: port") + if request.port_id in port_uuids: + msg = _( + "Port ID '%(port)s' was specified twice: you " + "cannot attach a port multiple times." + ) % { + "port": request.port_id, + } raise exc.HTTPBadRequest(explanation=msg) + if request.address is not None: - msg = _("Specified Fixed IP '%(addr)s' cannot be used " - "with port '%(port)s': the two cannot be " - "specified together.") % { - "addr": request.address, - "port": request.port_id} + msg = _( + "Specified Fixed IP '%(addr)s' cannot be used " + "with port '%(port)s': the two cannot be " + "specified together." + ) % { + "addr": request.address, + "port": request.port_id, + } raise exc.HTTPBadRequest(explanation=msg) + + request.network_id = None + port_uuids.append(request.port_id) else: request.network_id = network['uuid'] self._validate_network_id( @@ -383,22 +463,222 @@ def _get_requested_networks(self, requested_networks, def show(self, req, id): """Returns server details by server id.""" context = req.environ['nova.context'] - context.can(server_policies.SERVERS % 'show') - instance = self._get_server(context, req, id, is_detail=True) - return self._view_builder.show(req, instance) + cell_down_support = api_version_request.is_supported( + req, min_version=PARTIAL_CONSTRUCT_FOR_CELL_DOWN_MIN_VERSION) + show_server_groups = api_version_request.is_supported( + req, min_version='2.71') + + instance = self._get_server( + context, req, id, is_detail=True, + cell_down_support=cell_down_support) + context.can(server_policies.SERVERS % 'show', + target={'project_id': instance.project_id}) + + return self._view_builder.show( + req, instance, cell_down_support=cell_down_support, + show_server_groups=show_server_groups) + + @staticmethod + def _process_bdms_for_create( + context, target, server_dict, create_kwargs): + """Processes block_device_mapping(_v2) req parameters for server create + + :param context: The nova auth request context + :param target: The target dict for ``context.can`` policy checks + :param server_dict: The POST /servers request body "server" entry + :param create_kwargs: dict that gets populated by this method and + passed to nova.comptue.api.API.create() + :raises: webob.exc.HTTPBadRequest if the request parameters are invalid + :raises: nova.exception.Forbidden if a policy check fails + """ + block_device_mapping_legacy = server_dict.get('block_device_mapping', + []) + block_device_mapping_v2 = server_dict.get('block_device_mapping_v2', + []) + + if block_device_mapping_legacy and block_device_mapping_v2: + expl = _('Using different block_device_mapping syntaxes ' + 'is not allowed in the same request.') + raise exc.HTTPBadRequest(explanation=expl) + + if block_device_mapping_legacy: + for bdm in block_device_mapping_legacy: + if 'delete_on_termination' in bdm: + bdm['delete_on_termination'] = strutils.bool_from_string( + bdm['delete_on_termination']) + create_kwargs[ + 'block_device_mapping'] = block_device_mapping_legacy + # Sets the legacy_bdm flag if we got a legacy block device mapping. + create_kwargs['legacy_bdm'] = True + elif block_device_mapping_v2: + # Have to check whether --image is given, see bug 1433609 + image_href = server_dict.get('imageRef') + image_uuid_specified = image_href is not None + try: + block_device_mapping = [ + block_device.BlockDeviceDict.from_api(bdm_dict, + image_uuid_specified) + for bdm_dict in block_device_mapping_v2] + except exception.InvalidBDMFormat as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + create_kwargs['block_device_mapping'] = block_device_mapping + # Unset the legacy_bdm flag if we got a block device mapping. + create_kwargs['legacy_bdm'] = False + + block_device_mapping = create_kwargs.get("block_device_mapping") + if block_device_mapping: + context.can(server_policies.SERVERS % 'create:attach_volume', + target) + + def _process_networks_for_create( + self, context, target, server_dict, create_kwargs): + """Processes networks request parameter for server create + + :param context: The nova auth request context + :param target: The target dict for ``context.can`` policy checks + :param server_dict: The POST /servers request body "server" entry + :param create_kwargs: dict that gets populated by this method and + passed to nova.comptue.api.API.create() + :raises: webob.exc.HTTPBadRequest if the request parameters are invalid + :raises: nova.exception.Forbidden if a policy check fails + """ + requested_networks = server_dict.get('networks', None) + + if requested_networks is not None: + requested_networks = self._get_requested_networks( + requested_networks) + + # Skip policy check for 'create:attach_network' if there is no + # network allocation request. + if requested_networks and len(requested_networks) and \ + not requested_networks.no_allocate: + context.can(server_policies.SERVERS % 'create:attach_network', + target) + + create_kwargs['requested_networks'] = requested_networks + + @staticmethod + def _validate_host_availability_zone(context, availability_zone, host): + """Ensure the host belongs in the availability zone. + + This is slightly tricky and it's probably worth recapping how host + aggregates and availability zones are related before reading. Hosts can + belong to zero or more host aggregates, but they will always belong to + exactly one availability zone. If the user has set the availability + zone key on one of the host aggregates that the host is a member of + then the host will belong to this availability zone. If the user has + not set the availability zone key on any of the host aggregates that + the host is a member of or the host is not a member of any host + aggregates, then the host will belong to the default availability zone. + Setting the availability zone key on more than one of host aggregates + that the host is a member of is an error and will be rejected by the + API. + + Given the above, our host-availability zone check needs to vary + behavior based on whether we're requesting the default availability + zone or not. If we are not, then we simply ask "does this host belong + to a host aggregate and, if so, do any of the host aggregates have the + requested availability zone metadata set". By comparison, if we *are* + requesting the default availability zone then we want to ask the + inverse, or "does this host not belong to a host aggregate or, if it + does, is the availability zone information unset (or, naughty naughty, + set to the default) for each of the host aggregates". If both cases, if + the answer is no then we warn about the mismatch and then use the + actual availability zone of the host to avoid mismatches. + + :param context: The nova auth request context + :param availability_zone: The name of the requested availability zone + :param host: The name of the requested host + :returns: The availability zone that should actually be used for the + request + """ + aggregates = objects.AggregateList.get_by_host(context, host=host) + if not aggregates: + # a host is assigned to the default availability zone if it is not + # a member of any host aggregates + if availability_zone == CONF.default_availability_zone: + return availability_zone + + LOG.warning( + "Requested availability zone '%s' but forced host '%s' " + "does not belong to any availability zones; ignoring " + "requested availability zone to avoid bug #1934770", + availability_zone, host, + ) + return None + + # only one host aggregate will have the availability_zone field set so + # use the first non-null value + host_availability_zone = next( + (a.availability_zone for a in aggregates if a.availability_zone), + None, + ) + + if availability_zone == host_availability_zone: + # if there's an exact match, use what the user requested + return availability_zone + + if ( + availability_zone == CONF.default_availability_zone and + host_availability_zone is None + ): + # special case the default availability zone since this won't (or + # rather shouldn't) be explicitly stored on any host aggregate + return availability_zone + + # no match, so use the host's availability zone information, if any + LOG.warning( + "Requested availability zone '%s' but forced host '%s' " + "does not belong to this availability zone; overwriting " + "requested availability zone to avoid bug #1934770", + availability_zone, host, + ) + return None + + @staticmethod + def _process_hosts_for_create( + context, target, server_dict, create_kwargs, host, node): + """Processes hosts request parameter for server create + + :param context: The nova auth request context + :param target: The target dict for ``context.can`` policy checks + :param server_dict: The POST /servers request body "server" entry + :param create_kwargs: dict that gets populated by this method and + passed to nova.comptue.api.API.create() + :param host: Forced host of availability_zone + :param node: Forced node of availability_zone + :raise: webob.exc.HTTPBadRequest if the request parameters are invalid + :raise: nova.exception.Forbidden if a policy check fails + """ + requested_host = server_dict.get('host') + requested_hypervisor_hostname = server_dict.get('hypervisor_hostname') + if requested_host or requested_hypervisor_hostname: + # If the policy check fails, this will raise Forbidden exception. + context.can(server_policies.REQUESTED_DESTINATION, target=target) + if host or node: + msg = _("One mechanism with host and/or " + "hypervisor_hostname and another mechanism " + "with zone:host:node are mutually exclusive.") + raise exc.HTTPBadRequest(explanation=msg) + create_kwargs['requested_host'] = requested_host + create_kwargs['requested_hypervisor_hostname'] = ( + requested_hypervisor_hostname) @wsgi.response(202) @wsgi.expected_errors((400, 403, 409)) - @validation.schema(schema_servers.base_create_v20, '2.0', '2.0') - @validation.schema(schema_servers.base_create, '2.1', '2.18') - @validation.schema(schema_servers.base_create_v219, '2.19', '2.31') - @validation.schema(schema_servers.base_create_v232, '2.32', '2.32') - @validation.schema(schema_servers.base_create_v233, '2.33', '2.36') - @validation.schema(schema_servers.base_create_v237, '2.37', '2.41') - @validation.schema(schema_servers.base_create_v242, '2.42', '2.51') - @validation.schema(schema_servers.base_create_v252, '2.52', '2.56') - @validation.schema(schema_servers.base_create_v257, '2.57', '2.62') - @validation.schema(schema_servers.base_create_v263, '2.63') + @validation.schema(schema_servers.create_v20, '2.0', '2.0') + @validation.schema(schema_servers.create, '2.1', '2.18') + @validation.schema(schema_servers.create_v219, '2.19', '2.31') + @validation.schema(schema_servers.create_v232, '2.32', '2.32') + @validation.schema(schema_servers.create_v233, '2.33', '2.36') + @validation.schema(schema_servers.create_v237, '2.37', '2.41') + @validation.schema(schema_servers.create_v242, '2.42', '2.51') + @validation.schema(schema_servers.create_v252, '2.52', '2.56') + @validation.schema(schema_servers.create_v257, '2.57', '2.62') + @validation.schema(schema_servers.create_v263, '2.63', '2.66') + @validation.schema(schema_servers.create_v267, '2.67', '2.73') + @validation.schema(schema_servers.create_v274, '2.74', '2.89') + @validation.schema(schema_servers.create_v290, '2.90') def create(self, req, body): """Creates a new server for a given user.""" context = req.environ['nova.context'] @@ -408,6 +688,9 @@ def create(self, req, body): description = name if api_version_request.is_supported(req, min_version='2.19'): description = server_dict.get('description') + hostname = None + if api_version_request.is_supported(req, min_version='2.90'): + hostname = server_dict.get('hostname') # Arguments to be passed to instance create function create_kwargs = {} @@ -439,13 +722,11 @@ def create(self, req, body): # 'max_count' to be 'min_count'. min_count = int(server_dict.get('min_count', 1)) max_count = int(server_dict.get('max_count', min_count)) - return_id = server_dict.get('return_reservation_id', False) if min_count > max_count: msg = _('min_count must be <= max_count') raise exc.HTTPBadRequest(explanation=msg) create_kwargs['min_count'] = min_count create_kwargs['max_count'] = max_count - create_kwargs['return_reservation_id'] = return_id availability_zone = server_dict.pop("availability_zone", None) @@ -469,111 +750,53 @@ def create(self, req, body): context.can(server_policies.SERVERS % 'create:trusted_certs', target=target) - # TODO(Shao He, Feng) move this policy check to os-availability-zone - # extension after refactor it. parse_az = self.compute_api.parse_availability_zone try: availability_zone, host, node = parse_az(context, availability_zone) except exception.InvalidInput as err: - raise exc.HTTPBadRequest(explanation=six.text_type(err)) + raise exc.HTTPBadRequest(explanation=str(err)) if host or node: - context.can(server_policies.SERVERS % 'create:forced_host', {}) - - # NOTE(danms): Don't require an answer from all cells here, as - # we assume that if a cell isn't reporting we won't schedule into - # it anyway. A bit of a gamble, but a reasonable one. - min_compute_version = service_obj.get_minimum_version_all_cells( - nova_context.get_admin_context(), ['nova-compute']) - supports_device_tagging = (min_compute_version >= - DEVICE_TAGGING_MIN_COMPUTE_VERSION) - - block_device_mapping_legacy = server_dict.get('block_device_mapping', - []) - block_device_mapping_v2 = server_dict.get('block_device_mapping_v2', - []) - - if block_device_mapping_legacy and block_device_mapping_v2: - expl = _('Using different block_device_mapping syntaxes ' - 'is not allowed in the same request.') - raise exc.HTTPBadRequest(explanation=expl) + context.can(server_policies.SERVERS % 'create:forced_host', + target=target) + availability_zone = self._validate_host_availability_zone( + context, availability_zone, host) - if block_device_mapping_legacy: - for bdm in block_device_mapping_legacy: - if 'delete_on_termination' in bdm: - bdm['delete_on_termination'] = strutils.bool_from_string( - bdm['delete_on_termination']) - create_kwargs[ - 'block_device_mapping'] = block_device_mapping_legacy - # Sets the legacy_bdm flag if we got a legacy block device mapping. - create_kwargs['legacy_bdm'] = True - elif block_device_mapping_v2: - image_href = server_dict.get('imageRef') - image_uuid_specified = image_href is not None - try: - block_device_mapping = [ - block_device.BlockDeviceDict.from_api(bdm_dict, - image_uuid_specified) - for bdm_dict in block_device_mapping_v2] - except exception.InvalidBDMFormat as e: - raise exc.HTTPBadRequest(explanation=e.format_message()) - create_kwargs['block_device_mapping'] = block_device_mapping - # Unset the legacy_bdm flag if we got a block device mapping. - create_kwargs['legacy_bdm'] = False + if api_version_request.is_supported(req, min_version='2.74'): + self._process_hosts_for_create(context, target, server_dict, + create_kwargs, host, node) - block_device_mapping = create_kwargs.get("block_device_mapping") - if block_device_mapping: - context.can(server_policies.SERVERS % 'create:attach_volume', - target) - for bdm in block_device_mapping: - if bdm.get('tag', None) and not supports_device_tagging: - msg = _('Block device tags are not yet supported.') - raise exc.HTTPBadRequest(explanation=msg) + self._process_bdms_for_create( + context, target, server_dict, create_kwargs) image_uuid = self._image_from_req_data(server_dict, create_kwargs) - # NOTE(cyeoh): Although upper layer can set the value of - # return_reservation_id in order to request that a reservation - # id be returned to the client instead of the newly created - # instance information we do not want to pass this parameter - # to the compute create call which always returns both. We use - # this flag after the instance create call to determine what - # to return to the client - return_reservation_id = create_kwargs.pop('return_reservation_id', - False) - - requested_networks = server_dict.get('networks', None) - - if requested_networks is not None: - requested_networks = self._get_requested_networks( - requested_networks, supports_device_tagging) - - # Skip policy check for 'create:attach_network' if there is no - # network allocation request. - if requested_networks and len(requested_networks) and \ - not requested_networks.no_allocate: - context.can(server_policies.SERVERS % 'create:attach_network', - target) + self._process_networks_for_create( + context, target, server_dict, create_kwargs) flavor_id = self._flavor_id_from_req_data(body) try: - inst_type = flavors.get_flavor_by_flavor_id( - flavor_id, ctxt=context, read_deleted="no") + flavor = flavors.get_flavor_by_flavor_id( + flavor_id, ctxt=context, read_deleted="no") supports_multiattach = common.supports_multiattach_volume(req) - (instances, resv_id) = self.compute_api.create(context, - inst_type, - image_uuid, - display_name=name, - display_description=description, - availability_zone=availability_zone, - forced_host=host, forced_node=node, - metadata=server_dict.get('metadata', {}), - admin_password=password, - requested_networks=requested_networks, - check_server_group_quota=True, - supports_multiattach=supports_multiattach, - **create_kwargs) + supports_port_resource_request = \ + common.supports_port_resource_request(req) + instances, resv_id = self.compute_api.create( + context, + flavor, + image_uuid, + display_name=name, + display_description=description, + hostname=hostname, + availability_zone=availability_zone, + forced_host=host, forced_node=node, + metadata=server_dict.get('metadata', {}), + admin_password=password, + check_server_group_quota=True, + supports_multiattach=supports_multiattach, + supports_port_resource_request=supports_port_resource_request, + **create_kwargs) except (exception.QuotaError, exception.PortLimitExceeded) as error: raise exc.HTTPForbidden( @@ -597,19 +820,19 @@ def create(self, req, body): except UnicodeDecodeError as error: msg = "UnicodeError: %s" % error raise exc.HTTPBadRequest(explanation=msg) - except (exception.CPUThreadPolicyConfigurationInvalid, - exception.ImageNotActive, + except (exception.ImageNotActive, exception.ImageBadRequest, exception.ImageNotAuthorized, + exception.ImageUnacceptable, exception.FixedIpNotFoundForAddress, exception.FlavorNotFound, - exception.FlavorDiskTooSmall, - exception.FlavorMemoryTooSmall, exception.InvalidMetadata, - exception.InvalidRequest, exception.InvalidVolume, + exception.VolumeNotFound, + exception.MismatchVolumeAZException, exception.MultiplePortsNotApplicable, exception.InvalidFixedIpAndMaxCountRequest, + exception.AmbiguousHostnameForMultipleInstances, exception.InstanceUserDataMalformed, exception.PortNotFound, exception.FixedIpAlreadyInUse, @@ -627,41 +850,39 @@ def create(self, req, body): exception.InvalidBDMEphemeralSize, exception.InvalidBDMFormat, exception.InvalidBDMSwapSize, + exception.InvalidBDMDiskBus, + exception.VolumeTypeNotFound, exception.AutoDiskConfigDisabledByImage, - exception.ImageCPUPinningForbidden, - exception.ImageCPUThreadPolicyForbidden, - exception.ImageNUMATopologyIncomplete, - exception.ImageNUMATopologyForbidden, - exception.ImageNUMATopologyAsymmetric, - exception.ImageNUMATopologyCPUOutOfRange, - exception.ImageNUMATopologyCPUDuplicates, - exception.ImageNUMATopologyCPUsUnassigned, - exception.ImageNUMATopologyMemoryOutOfRange, - exception.InvalidNUMANodesNumber, exception.InstanceGroupNotFound, - exception.MemoryPageSizeInvalid, - exception.MemoryPageSizeForbidden, - exception.PciRequestAliasNotDefined, - exception.RealtimeConfigurationInvalid, - exception.RealtimeMaskNotFoundOrInvalid, exception.SnapshotNotFound, exception.UnableToAutoAllocateNetwork, exception.MultiattachNotSupportedOldMicroversion, - exception.CertificateValidationFailed) as error: + exception.CertificateValidationFailed, + exception.CreateWithPortResourceRequestOldVersion, + exception.DeviceProfileError, + exception.ComputeHostNotFound, + exception.ForbiddenPortsWithAccelerator, + exception.ExtendedResourceRequestOldCompute, + ) as error: + raise exc.HTTPBadRequest(explanation=error.format_message()) + except INVALID_FLAVOR_IMAGE_EXCEPTIONS as error: raise exc.HTTPBadRequest(explanation=error.format_message()) except (exception.PortInUse, exception.InstanceExists, exception.NetworkAmbiguous, exception.NoUniqueMatch, - exception.MultiattachSupportNotYetAvailable, - exception.CertificateValidationNotYetAvailable) as error: + exception.MixedInstanceNotSupportByComputeService) as error: raise exc.HTTPConflict(explanation=error.format_message()) + except exception.InvalidQuotaMethodUsage as error: + if ' instances_' in error.message: + msg = "Invalid baremetal flavor for this region." + raise exc.HTTPBadRequest(explanation=msg) + raise # If the caller wanted a reservation_id, return it - if return_reservation_id: + if server_dict.get('return_reservation_id', False): return wsgi.ResponseObject({'reservation_id': resv_id}) - req.cache_db_instances(instances) server = self._view_builder.create(req, instances[0]) if CONF.api.enable_instance_password: @@ -688,9 +909,10 @@ def _delete(self, context, req, instance_uuid): self.compute_api.delete(context, instance) @wsgi.expected_errors(404) - @validation.schema(schema_servers.base_update_v20, '2.0', '2.0') - @validation.schema(schema_servers.base_update, '2.1', '2.18') - @validation.schema(schema_servers.base_update_v219, '2.19') + @validation.schema(schema_servers.update_v20, '2.0', '2.0') + @validation.schema(schema_servers.update, '2.1', '2.18') + @validation.schema(schema_servers.update_v219, '2.19', '2.89') + @validation.schema(schema_servers.update_v290, '2.90') def update(self, req, id, body): """Update server then pass on to version-specific controller.""" @@ -700,6 +922,8 @@ def update(self, req, id, body): ctxt.can(server_policies.SERVERS % 'update', target={'user_id': instance.user_id, 'project_id': instance.project_id}) + show_server_groups = api_version_request.is_supported( + req, min_version='2.71') server = body['server'] @@ -711,13 +935,46 @@ def update(self, req, id, body): # This is allowed to be None (remove description) update_dict['display_description'] = server['description'] + if 'hostname' in server: + update_dict['hostname'] = server['hostname'] + helpers.translate_attributes(helpers.UPDATE, server, update_dict) try: - instance = self.compute_api.update_instance(ctxt, instance, - update_dict) - return self._view_builder.show(req, instance, - extend_address=False) + instance = self.compute_api.update_instance( + ctxt, instance, update_dict) + + # NOTE(gmann): Starting from microversion 2.75, PUT and Rebuild + # API response will show all attributes like GET /servers API. + show_all_attributes = api_version_request.is_supported( + req, min_version='2.75') + extend_address = show_all_attributes + show_AZ = show_all_attributes + show_config_drive = show_all_attributes + show_keypair = show_all_attributes + show_srv_usg = show_all_attributes + show_sec_grp = show_all_attributes + show_extended_status = show_all_attributes + show_extended_volumes = show_all_attributes + # NOTE(gmann): Below attributes need to be added in response + # if respective policy allows.So setting these as None + # to perform the policy check in view builder. + show_extended_attr = None if show_all_attributes else False + show_host_status = None if show_all_attributes else False + + return self._view_builder.show( + req, instance, + extend_address=extend_address, + show_AZ=show_AZ, + show_config_drive=show_config_drive, + show_extended_attr=show_extended_attr, + show_host_status=show_host_status, + show_keypair=show_keypair, + show_srv_usg=show_srv_usg, + show_sec_grp=show_sec_grp, + show_extended_status=show_extended_status, + show_extended_volumes=show_extended_volumes, + show_server_groups=show_server_groups) except exception.InstanceNotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) @@ -730,16 +987,18 @@ def update(self, req, id, body): @wsgi.action('confirmResize') def _action_confirm_resize(self, req, id, body): context = req.environ['nova.context'] - context.can(server_policies.SERVERS % 'confirm_resize') instance = self._get_server(context, req, id) + context.can(server_policies.SERVERS % 'confirm_resize', + target={'project_id': instance.project_id}) try: self.compute_api.confirm_resize(context, instance) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.MigrationNotFound: msg = _("Instance has not been resized.") raise exc.HTTPBadRequest(explanation=msg) - except exception.InstanceIsLocked as e: + except ( + exception.InstanceIsLocked, + exception.ServiceUnavailable, + ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, @@ -750,12 +1009,11 @@ def _action_confirm_resize(self, req, id, body): @wsgi.action('revertResize') def _action_revert_resize(self, req, id, body): context = req.environ['nova.context'] - context.can(server_policies.SERVERS % 'revert_resize') instance = self._get_server(context, req, id) + context.can(server_policies.SERVERS % 'revert_resize', + target={'project_id': instance.project_id}) try: self.compute_api.revert_resize(context, instance) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.MigrationNotFound: msg = _("Instance has not been resized.") raise exc.HTTPBadRequest(explanation=msg) @@ -776,8 +1034,9 @@ def _action_reboot(self, req, id, body): reboot_type = body['reboot']['type'].upper() context = req.environ['nova.context'] - context.can(server_policies.SERVERS % 'reboot') instance = self._get_server(context, req, id) + context.can(server_policies.SERVERS % 'reboot', + target={'project_id': instance.project_id}) try: self.compute_api.reboot(context, instance, reboot_type) @@ -787,22 +1046,27 @@ def _action_reboot(self, req, id, body): common.raise_http_conflict_for_instance_invalid_state(state_error, 'reboot', id) - def _resize(self, req, instance_id, flavor_id, **kwargs): + def _resize(self, req, instance_id, flavor_id, auto_disk_config=None): """Begin the resize process with given instance/flavor.""" context = req.environ["nova.context"] - instance = self._get_server(context, req, instance_id) + instance = self._get_server(context, req, instance_id, + columns_to_join=['services']) context.can(server_policies.SERVERS % 'resize', target={'user_id': instance.user_id, 'project_id': instance.project_id}) try: - self.compute_api.resize(context, instance, flavor_id, **kwargs) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) + self.compute_api.resize(context, instance, flavor_id, + auto_disk_config=auto_disk_config) except exception.QuotaError as error: raise exc.HTTPForbidden( explanation=error.format_message()) - except exception.InstanceIsLocked as e: + except ( + exception.InstanceIsLocked, + exception.InstanceNotReady, + exception.MixedInstanceNotSupportByComputeService, + exception.ServiceUnavailable, + ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, @@ -815,12 +1079,15 @@ def _resize(self, req, instance_id, flavor_id, **kwargs): msg = _("Image that the instance was started " "with could not be found.") raise exc.HTTPBadRequest(explanation=msg) - except (exception.AutoDiskConfigDisabledByImage, - exception.CannotResizeDisk, - exception.CannotResizeToSameFlavor, - exception.FlavorNotFound, - exception.NoValidHost, - exception.PciRequestAliasNotDefined) as e: + except ( + exception.AutoDiskConfigDisabledByImage, + exception.CannotResizeDisk, + exception.CannotResizeToSameFlavor, + exception.FlavorNotFound, + exception.ExtendedResourceRequestOldCompute, + ) as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + except INVALID_FLAVOR_IMAGE_EXCEPTIONS as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.Invalid: msg = _("Invalid instance image.") @@ -835,9 +1102,8 @@ def delete(self, req, id): except exception.InstanceNotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.InstanceIsLocked as e: + except (exception.InstanceIsLocked, + exception.AllocationDeleteFailed) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, @@ -881,12 +1147,13 @@ def _action_resize(self, req, id, body): @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) @wsgi.action('rebuild') - @validation.schema(schema_servers.base_rebuild_v20, '2.0', '2.0') - @validation.schema(schema_servers.base_rebuild, '2.1', '2.18') - @validation.schema(schema_servers.base_rebuild_v219, '2.19', '2.53') - @validation.schema(schema_servers.base_rebuild_v254, '2.54', '2.56') - @validation.schema(schema_servers.base_rebuild_v257, '2.57', '2.62') - @validation.schema(schema_servers.base_rebuild_v263, '2.63') + @validation.schema(schema_servers.rebuild_v20, '2.0', '2.0') + @validation.schema(schema_servers.rebuild, '2.1', '2.18') + @validation.schema(schema_servers.rebuild_v219, '2.19', '2.53') + @validation.schema(schema_servers.rebuild_v254, '2.54', '2.56') + @validation.schema(schema_servers.rebuild_v257, '2.57', '2.62') + @validation.schema(schema_servers.rebuild_v263, '2.63', '2.89') + @validation.schema(schema_servers.rebuild_v290, '2.90') def _action_rebuild(self, req, id, body): """Rebuild an instance with the given attributes.""" rebuild_dict = body['rebuild'] @@ -910,8 +1177,10 @@ def _action_rebuild(self, req, id, body): helpers.translate_attributes(helpers.REBUILD, rebuild_dict, kwargs) - if (api_version_request.is_supported(req, min_version='2.54') - and 'key_name' in rebuild_dict): + if ( + api_version_request.is_supported(req, min_version='2.54') and + 'key_name' in rebuild_dict + ): kwargs['key_name'] = rebuild_dict.get('key_name') # If user_data is not specified, we don't include it in kwargs because @@ -923,17 +1192,25 @@ def _action_rebuild(self, req, id, body): # Skip policy check for 'rebuild:trusted_certs' if no trusted # certificate IDs were provided. - if ((api_version_request.is_supported(req, min_version='2.63')) - # Note that this is different from server create since with - # rebuild a user can unset/reset the trusted certs by - # specifying trusted_image_certificates=None, similar to - # key_name. - and ('trusted_image_certificates' in rebuild_dict)): + if ( + api_version_request.is_supported(req, min_version='2.63') and + # Note that this is different from server create since with + # rebuild a user can unset/reset the trusted certs by + # specifying trusted_image_certificates=None, similar to + # key_name. + 'trusted_image_certificates' in rebuild_dict + ): kwargs['trusted_certs'] = rebuild_dict.get( 'trusted_image_certificates') context.can(server_policies.SERVERS % 'rebuild:trusted_certs', target=target) + if ( + api_version_request.is_supported(req, min_version='2.90') and + 'hostname' in rebuild_dict + ): + kwargs['hostname'] = rebuild_dict['hostname'] + for request_attribute, instance_attribute in attr_map.items(): try: if request_attribute == 'name': @@ -951,8 +1228,7 @@ def _action_rebuild(self, req, id, body): image_href, password, **kwargs) - except (exception.InstanceIsLocked, - exception.CertificateValidationNotYetAvailable) as e: + except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, @@ -960,8 +1236,6 @@ def _action_rebuild(self, req, id, body): except exception.InstanceNotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.ImageNotFound: msg = _("Cannot find image for rebuild") raise exc.HTTPBadRequest(explanation=msg) @@ -970,28 +1244,67 @@ def _action_rebuild(self, req, id, body): raise exc.HTTPBadRequest(explanation=msg) except exception.QuotaError as error: raise exc.HTTPForbidden(explanation=error.format_message()) - except (exception.ImageNotActive, + except (exception.AutoDiskConfigDisabledByImage, + exception.CertificateValidationFailed, + exception.ImageNotActive, exception.ImageUnacceptable, - exception.FlavorDiskTooSmall, - exception.FlavorMemoryTooSmall, exception.InvalidMetadata, - exception.AutoDiskConfigDisabledByImage, - exception.CertificateValidationFailed) as error: + exception.InvalidArchitectureName, + exception.InvalidVolume, + ) as error: + raise exc.HTTPBadRequest(explanation=error.format_message()) + except INVALID_FLAVOR_IMAGE_EXCEPTIONS as error: raise exc.HTTPBadRequest(explanation=error.format_message()) instance = self._get_server(context, req, id, is_detail=True) - view = self._view_builder.show(req, instance, extend_address=False) + # NOTE(liuyulong): set the new key_name for the API response. + # from microversion 2.54 onwards. + show_keypair = api_version_request.is_supported( + req, min_version='2.54') + show_server_groups = api_version_request.is_supported( + req, min_version='2.71') + + # NOTE(gmann): Starting from microversion 2.75, PUT and Rebuild + # API response will show all attributes like GET /servers API. + show_all_attributes = api_version_request.is_supported( + req, min_version='2.75') + extend_address = show_all_attributes + show_AZ = show_all_attributes + show_config_drive = show_all_attributes + show_srv_usg = show_all_attributes + show_sec_grp = show_all_attributes + show_extended_status = show_all_attributes + show_extended_volumes = show_all_attributes + # NOTE(gmann): Below attributes need to be added in response + # if respective policy allows.So setting these as None + # to perform the policy check in view builder. + show_extended_attr = None if show_all_attributes else False + show_host_status = None if show_all_attributes else False + + view = self._view_builder.show( + req, instance, + extend_address=extend_address, + show_AZ=show_AZ, + show_config_drive=show_config_drive, + show_extended_attr=show_extended_attr, + show_host_status=show_host_status, + show_keypair=show_keypair, + show_srv_usg=show_srv_usg, + show_sec_grp=show_sec_grp, + show_extended_status=show_extended_status, + show_extended_volumes=show_extended_volumes, + show_server_groups=show_server_groups, + # NOTE(gmann): user_data has been added in response (by code at + # the end of this API method) since microversion 2.57 so tell + # view builder not to include it. + show_user_data=False) # Add on the admin_password attribute since the view doesn't do it # unless instance passwords are disabled if CONF.api.enable_instance_password: view['server']['adminPass'] = password - if api_version_request.is_supported(req, min_version='2.54'): - # NOTE(liuyulong): set the new key_name for the API response. - view['server']['key_name'] = instance.key_name - if include_user_data: view['server']['user_data'] = instance.user_data @@ -1006,7 +1319,10 @@ def _action_rebuild(self, req, id, body): def _action_create_image(self, req, id, body): """Snapshot a server instance.""" context = req.environ['nova.context'] - context.can(server_policies.SERVERS % 'create_image') + instance = self._get_server(context, req, id) + target = {'project_id': instance.project_id} + context.can(server_policies.SERVERS % 'create_image', + target=target) entity = body["createImage"] image_name = common.normalize_name(entity["name"]) @@ -1018,8 +1334,6 @@ def _action_create_image(self, req, id, body): api_version_request.MAX_IMAGE_META_PROXY_API_VERSION): common.check_img_metadata_properties_quota(context, metadata) - instance = self._get_server(context, req, id) - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) @@ -1027,7 +1341,7 @@ def _action_create_image(self, req, id, body): if compute_utils.is_volume_backed_instance(context, instance, bdms): context.can(server_policies.SERVERS % - 'create_image:allow_volume_backed') + 'create_image:allow_volume_backed', target=target) image = self.compute_api.snapshot_volume_backed( context, instance, @@ -1039,8 +1353,6 @@ def _action_create_image(self, req, id, body): instance, image_name, extra_properties=metadata) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'createImage', id) @@ -1056,7 +1368,7 @@ def _action_create_image(self, req, id, body): # build location of newly-created image entity image_id = str(image['id']) - image_ref = image_api.API().generate_image_url(image_id, context) + image_ref = glance.API().generate_image_url(image_id, context) resp = webob.Response(status_int=202) resp.headers['Location'] = image_ref @@ -1072,23 +1384,36 @@ def _get_server_admin_password(self, server): def _get_server_search_options(self, req): """Return server search options allowed by non-admin.""" + # NOTE(mriedem): all_tenants is admin-only by default but because of + # tight-coupling between this method, the remove_invalid_options method + # and how _get_servers uses them, we include all_tenants here but it + # will be removed later for non-admins. Fixing this would be nice but + # probably not trivial. opt_list = ('reservation_id', 'name', 'status', 'image', 'flavor', 'ip', 'changes-since', 'all_tenants') if api_version_request.is_supported(req, min_version='2.5'): opt_list += ('ip6',) if api_version_request.is_supported(req, min_version='2.26'): opt_list += TAG_SEARCH_FILTERS + if api_version_request.is_supported(req, min_version='2.66'): + opt_list += ('changes-before',) + if api_version_request.is_supported(req, min_version='2.73'): + opt_list += ('locked',) + if api_version_request.is_supported(req, min_version='2.83'): + opt_list += ('availability_zone', 'config_drive', 'key_name', + 'created_at', 'launched_at', 'terminated_at', + 'power_state', 'task_state', 'vm_state', 'progress', + 'user_id',) + if api_version_request.is_supported(req, min_version='2.90'): + opt_list += ('hostname',) return opt_list def _get_instance(self, context, instance_uuid): try: attrs = ['system_metadata', 'metadata'] - if not CONF.cells.enable: - # NOTE(danms): We can't target a cell database if we're - # in cellsv1 otherwise we'll short-circuit the replication. - mapping = objects.InstanceMapping.get_by_instance_uuid( - context, instance_uuid) - nova_context.set_target_cell(context, mapping.cell_mapping) + mapping = objects.InstanceMapping.get_by_instance_uuid( + context, instance_uuid) + nova_context.set_target_cell(context, mapping.cell_mapping) return objects.Instance.get_by_uuid( context, instance_uuid, expected_attrs=attrs) except (exception.InstanceNotFound, @@ -1102,13 +1427,13 @@ def _start_server(self, req, id, body): """Start an instance.""" context = req.environ['nova.context'] instance = self._get_instance(context, id) - context.can(server_policies.SERVERS % 'start', instance) + context.can(server_policies.SERVERS % 'start', + target={'user_id': instance.user_id, + 'project_id': instance.project_id}) try: self.compute_api.start(context, instance) except (exception.InstanceNotReady, exception.InstanceIsLocked) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'start', id) @@ -1127,8 +1452,6 @@ def _stop_server(self, req, id, body): self.compute_api.stop(context, instance) except (exception.InstanceNotReady, exception.InstanceIsLocked) as e: raise webob.exc.HTTPConflict(explanation=e.format_message()) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'stop', id) @@ -1155,10 +1478,12 @@ def _action_trigger_crash_dump(self, req, id, body): def remove_invalid_options(context, search_options, allowed_search_options): - """Remove search options that are not valid for non-admin API/context.""" - if context.is_admin: + """Remove search options that are not permitted unless policy allows.""" + + if context.can(server_policies.SERVERS % 'allow_all_filters', + fatal=False): # Only remove parameters for sorting and pagination - for key in ('sort_key', 'sort_dir', 'limit', 'marker'): + for key in PAGING_SORTING_PARAMS: search_options.pop(key, None) return # Otherwise, strip out all unknown options diff --git a/nova/api/openstack/compute/services.py b/nova/api/openstack/compute/services.py index 818b02e7a7c..6deb84a7f1a 100644 --- a/nova/api/openstack/compute/services.py +++ b/nova/api/openstack/compute/services.py @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +from keystoneauth1 import exceptions as ks_exc +from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import uuidutils import webob.exc @@ -21,7 +23,7 @@ from nova.api.openstack import wsgi from nova.api import validation from nova import availability_zones -from nova import compute +from nova.compute import api as compute from nova import exception from nova.i18n import _ from nova import objects @@ -31,18 +33,28 @@ from nova import utils UUID_FOR_ID_MIN_VERSION = '2.53' +PARTIAL_CONSTRUCT_FOR_CELL_DOWN_MIN_VERSION = '2.69' + +LOG = logging.getLogger(__name__) class ServiceController(wsgi.Controller): def __init__(self): + super(ServiceController, self).__init__() self.host_api = compute.HostAPI() - self.aggregate_api = compute.api.AggregateAPI() + self.aggregate_api = compute.AggregateAPI() self.servicegroup_api = servicegroup.API() self.actions = {"enable": self._enable, "disable": self._disable, "disable-log-reason": self._disable_log_reason} - self.placementclient = report.SchedulerReportClient() + self._placementclient = None # Lazy-load on first access. + + @property + def placementclient(self): + if self._placementclient is None: + self._placementclient = report.SchedulerReportClient() + return self._placementclient def _get_services(self, req): # The API services are filtered out since they are not RPC services @@ -51,13 +63,15 @@ def _get_services(self, req): api_services = ('nova-osapi_compute', 'nova-metadata') context = req.environ['nova.context'] - context.can(services_policies.BASE_POLICY_NAME) + + cell_down_support = api_version_request.is_supported( + req, min_version=PARTIAL_CONSTRUCT_FOR_CELL_DOWN_MIN_VERSION) _services = [ - s - for s in self.host_api.service_get_all(context, set_zones=True, - all_cells=True) - if s['binary'] not in api_services + s + for s in self.host_api.service_get_all(context, set_zones=True, + all_cells=True, cell_down_support=cell_down_support) + if s['binary'] not in api_services ] host = '' @@ -73,7 +87,16 @@ def _get_services(self, req): return _services - def _get_service_detail(self, svc, additional_fields, req): + def _get_service_detail(self, svc, additional_fields, req, + cell_down_support=False): + # NOTE(tssurya): The below logic returns a minimal service construct + # consisting of only the host, binary and status fields for the compute + # services in the down cell. + if (cell_down_support and 'uuid' not in svc): + return {'binary': svc.binary, + 'host': svc.host, + 'status': "UNKNOWN"} + alive = self.servicegroup_api.service_is_up(svc) state = (alive and "up") or "down" active = 'enabled' @@ -110,8 +133,10 @@ def _get_service_detail(self, svc, additional_fields, req): def _get_services_list(self, req, additional_fields=()): _services = self._get_services(req) - return [self._get_service_detail(svc, additional_fields, req) - for svc in _services] + cell_down_support = api_version_request.is_supported(req, + min_version=PARTIAL_CONSTRUCT_FOR_CELL_DOWN_MIN_VERSION) + return [self._get_service_detail(svc, additional_fields, req, + cell_down_support=cell_down_support) for svc in _services] def _enable(self, body, context): """Enable scheduling for a service.""" @@ -164,6 +189,9 @@ def _forced_down(self, body, context): host = body['host'] binary = body['binary'] + if binary == 'nova-compute' and forced_down is False: + self._check_for_evacuations(context, host) + ret_value = {'service': {'host': host, 'binary': binary, 'forced_down': forced_down}} @@ -172,8 +200,18 @@ def _forced_down(self, body, context): def _update(self, context, host, binary, payload): """Do the actual PUT/update""" + # If the user tried to perform an action + # (disable/enable/force down) on a non-nova-compute + # service, provide a more useful error message. + if binary != 'nova-compute': + msg = (_( + 'Updating a %(binary)s service is not supported. Only ' + 'nova-compute services can be updated.') % {'binary': binary}) + raise webob.exc.HTTPBadRequest(explanation=msg) + try: - self.host_api.service_update(context, host, binary, payload) + self.host_api.service_update_by_host_and_binary( + context, host, binary, payload) except (exception.HostBinaryNotFound, exception.HostMappingNotFound) as exc: raise webob.exc.HTTPNotFound(explanation=exc.format_message()) @@ -181,7 +219,6 @@ def _update(self, context, host, binary, payload): def _perform_action(self, req, id, body, actions): """Calculate action dictionary dependent on provided fields""" context = req.environ['nova.context'] - context.can(services_policies.BASE_POLICY_NAME) try: action = actions[id] @@ -191,12 +228,31 @@ def _perform_action(self, req, id, body, actions): return action(body, context) + def _check_for_evacuations(self, context, hostname): + # NOTE(lyarwood): When forcing a compute service back up ensure that + # there are no evacuation migration records against this host as the + # source that are marked as done, suggesting that the compute service + # hasn't restarted and moved such records to a completed state. + filters = { + 'source_compute': hostname, + 'status': 'done', + 'migration_type': objects.fields.MigrationType.EVACUATION, + } + if any(objects.MigrationList.get_by_filters(context, filters)): + msg = _("Unable to force up host %(host)s as `done` evacuation " + "migration records remain associated with the host. " + "Ensure the compute service has been restarted, " + "allowing these records to move to `completed` before " + "retrying this request.") % {'host': hostname} + # TODO(lyarwood): Move to 409 HTTPConflict under a new microversion + raise webob.exc.HTTPBadRequest(explanation=msg) + @wsgi.response(204) @wsgi.expected_errors((400, 404, 409)) def delete(self, req, id): """Deletes the specified service.""" context = req.environ['nova.context'] - context.can(services_policies.BASE_POLICY_NAME) + context.can(services_policies.BASE_POLICY_NAME % 'delete', target={}) if api_version_request.is_supported( req, min_version=UUID_FOR_ID_MIN_VERSION): @@ -219,16 +275,40 @@ def delete(self, req, id): # related compute_nodes record) delete since it will impact # resource accounting in Placement and orphan the compute node # resource provider. - # TODO(mriedem): Use a COUNT SQL query-based function instead - # of InstanceList.get_uuids_by_host for performance. - instance_uuids = objects.InstanceList.get_uuids_by_host( - context, service['host']) - if instance_uuids: + num_instances = objects.InstanceList.get_count_by_hosts( + context, [service['host']]) + if num_instances: raise webob.exc.HTTPConflict( explanation=_('Unable to delete compute service that ' 'is hosting instances. Migrate or ' 'delete the instances first.')) + # Similarly, check to see if the are any in-progress migrations + # involving this host because if there are we need to block the + # service delete since we could orphan resource providers and + # break the ability to do things like confirm/revert instances + # in VERIFY_RESIZE status. + compute_nodes = [] + try: + compute_nodes = objects.ComputeNodeList.get_all_by_host( + context, service.host) + self._assert_no_in_progress_migrations( + context, id, compute_nodes) + except exception.ComputeHostNotFound: + # NOTE(artom) Consider the following situation: + # - Using the Ironic virt driver + # - Replacing (so removing and re-adding) all baremetal + # nodes associated with a single nova-compute service + # The update resources periodic will have destroyed the + # compute node records because they're no longer being + # reported by the virt driver. If we then attempt to + # manually delete the compute service record, + # get_all_host() above will raise, as there are no longer + # any compute node records for the host. Catch it here and + # continue to allow compute service deletion. + LOG.info('Deleting compute service with no associated ' + 'compute nodes.') + aggrs = self.aggregate_api.get_aggregates_by_host(context, service.host) for ag in aggrs: @@ -236,9 +316,18 @@ def delete(self, req, id): ag.id, service.host) # remove the corresponding resource provider record from - # placement for this compute node - self.placementclient.delete_resource_provider( - context, service.compute_node, cascade=True) + # placement for the compute nodes managed by this service; + # remember that an ironic compute service can manage multiple + # nodes + for compute_node in compute_nodes: + try: + self.placementclient.delete_resource_provider( + context, compute_node, cascade=True) + except ks_exc.ClientException as e: + LOG.error( + "Failed to delete compute node resource provider " + "for compute node %s: %s", + compute_node.uuid, str(e)) # remove the host_mapping of this host. try: hm = objects.HostMapping.get_by_host(context, service.host) @@ -257,12 +346,45 @@ def delete(self, req, id): explanation = _("Service id %s refers to multiple services.") % id raise webob.exc.HTTPBadRequest(explanation=explanation) - @validation.query_schema(services.index_query_schema) + @staticmethod + def _assert_no_in_progress_migrations(context, service_id, compute_nodes): + """Ensures there are no in-progress migrations on the given nodes. + + :param context: nova auth RequestContext + :param service_id: id of the Service being deleted + :param compute_nodes: ComputeNodeList of nodes on a compute service + :raises: HTTPConflict if there are any in-progress migrations on the + nodes + """ + for cn in compute_nodes: + migrations = ( + objects.MigrationList.get_in_progress_by_host_and_node( + context, cn.host, cn.hypervisor_hostname)) + if migrations: + # Log the migrations for the operator and then raise + # a 409 error. + LOG.info('Unable to delete compute service with id %s ' + 'for host %s. There are %i in-progress ' + 'migrations involving the host. Migrations ' + '(uuid:status): %s', + service_id, cn.host, len(migrations), + ','.join(['%s:%s' % (mig.uuid, mig.status) + for mig in migrations])) + raise webob.exc.HTTPConflict( + explanation=_( + 'Unable to delete compute service that has ' + 'in-progress migrations. Complete the ' + 'migrations or delete the instances first.')) + + @validation.query_schema(services.index_query_schema_275, '2.75') + @validation.query_schema(services.index_query_schema, '2.0', '2.74') @wsgi.expected_errors(()) def index(self, req): """Return a list of all running services. Filter by host & service name """ + context = req.environ['nova.context'] + context.can(services_policies.BASE_POLICY_NAME % 'list', target={}) if api_version_request.is_supported(req, min_version='2.11'): _services = self._get_services_list(req, ['forced_down']) else: @@ -282,6 +404,8 @@ def update(self, req, id, body): service ID passed on the path, just the action, for example PUT /os-services/disable. """ + context = req.environ['nova.context'] + context.can(services_policies.BASE_POLICY_NAME % 'update', target={}) if api_version_request.is_supported(req, min_version='2.11'): actions = self.actions.copy() actions["force-down"] = self._forced_down @@ -293,7 +417,7 @@ def update(self, req, id, body): @wsgi.Controller.api_version(UUID_FOR_ID_MIN_VERSION) # noqa F811 @wsgi.expected_errors((400, 404)) @validation.schema(services.service_update_v2_53, UUID_FOR_ID_MIN_VERSION) - def update(self, req, id, body): + def update(self, req, id, body): # noqa """Perform service update Starting with microversion 2.53, the service uuid is passed in on the @@ -308,7 +432,7 @@ def update(self, req, id, body): # Validate the request context against the policy. context = req.environ['nova.context'] - context.can(services_policies.BASE_POLICY_NAME) + context.can(services_policies.BASE_POLICY_NAME % 'update', target={}) # Get the service by uuid. try: @@ -324,7 +448,7 @@ def update(self, req, id, body): # technically disable a nova-scheduler service, although that doesn't # really do anything within Nova and is just confusing. Now trying to # do that will fail as a nova-scheduler service won't have a host - # mapping so you'll get a 404. In this new microversion, we close that + # mapping so you'll get a 400. In this new microversion, we close that # old gap and make sure you can only enable/disable and set forced_down # on nova-compute services since those are the only ones that make # sense to update for those operations. @@ -360,6 +484,8 @@ def update(self, req, id, body): if 'forced_down' in body: service.forced_down = strutils.bool_from_string( body['forced_down'], strict=True) + if service.forced_down is False: + self._check_for_evacuations(context, service.host) # Check to see if anything was actually updated since the schema does # not define any required fields. @@ -369,7 +495,7 @@ def update(self, req, id, body): raise webob.exc.HTTPBadRequest(explanation=msg) # Now save our updates to the service record in the database. - service.save() + self.host_api.service_update(context, service) # Return the full service record details. additional_fields = ['forced_down'] diff --git a/nova/api/openstack/compute/shelve.py b/nova/api/openstack/compute/shelve.py index 312e51452a7..7e1601a2a82 100644 --- a/nova/api/openstack/compute/shelve.py +++ b/nova/api/openstack/compute/shelve.py @@ -14,22 +14,28 @@ """The shelved mode extension.""" +from oslo_log import log as logging from webob import exc +from nova.api.openstack import api_version_request from nova.api.openstack import common +from nova.api.openstack.compute.schemas import shelve as shelve_schemas from nova.api.openstack import wsgi -from nova import compute +from nova.api import validation +from nova.compute import api as compute from nova import exception from nova.policies import shelve as shelve_policies +LOG = logging.getLogger(__name__) + class ShelveController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(ShelveController, self).__init__(*args, **kwargs) + def __init__(self): + super(ShelveController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((404, 403, 409, 400)) @wsgi.action('shelve') def _shelve(self, req, id, body): """Move an instance into shelved mode.""" @@ -41,16 +47,19 @@ def _shelve(self, req, id, body): 'project_id': instance.project_id}) try: self.compute_api.shelve(context, instance) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.InstanceIsLocked as e: + except ( + exception.InstanceIsLocked, + exception.UnexpectedTaskStateError, + ) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'shelve', id) + except exception.ForbiddenPortsWithAccelerator as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) @wsgi.response(202) - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((400, 404, 409)) @wsgi.action('shelveOffload') def _shelve_offload(self, req, id, body): """Force removal of a shelved instance from the compute node.""" @@ -60,8 +69,6 @@ def _shelve_offload(self, req, id, body): instance = common.get_instance(self.compute_api, context, id) try: self.compute_api.shelve_offload(context, instance) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: @@ -69,21 +76,41 @@ def _shelve_offload(self, req, id, body): 'shelveOffload', id) + except exception.ForbiddenPortsWithAccelerator as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) + @wsgi.response(202) - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((400, 404, 409)) @wsgi.action('unshelve') + # In microversion 2.77 we support specifying 'availability_zone' to + # unshelve a server. But before 2.77 there is no request body + # schema validation (because of body=null). + @validation.schema(shelve_schemas.unshelve_v277, min_version='2.77') def _unshelve(self, req, id, body): """Restore an instance from shelved mode.""" context = req.environ["nova.context"] - context.can(shelve_policies.POLICY_ROOT % 'unshelve') instance = common.get_instance(self.compute_api, context, id) + context.can(shelve_policies.POLICY_ROOT % 'unshelve', + target={'project_id': instance.project_id}) + + new_az = None + unshelve_dict = body['unshelve'] + support_az = api_version_request.is_supported(req, '2.77') + if support_az and unshelve_dict: + new_az = unshelve_dict['availability_zone'] + try: - self.compute_api.unshelve(context, instance) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.InstanceIsLocked as e: + self.compute_api.unshelve(context, instance, new_az=new_az) + except (exception.InstanceIsLocked, + exception.UnshelveInstanceInvalidState, + exception.MismatchVolumeAZException) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'unshelve', id) + except ( + exception.InvalidRequest, + exception.ExtendedResourceRequestOldCompute, + ) as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) diff --git a/nova/api/openstack/compute/simple_tenant_usage.py b/nova/api/openstack/compute/simple_tenant_usage.py index 7aa4dff1a66..251323c91e6 100644 --- a/nova/api/openstack/compute/simple_tenant_usage.py +++ b/nova/api/openstack/compute/simple_tenant_usage.py @@ -13,12 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. +import collections import datetime +from urllib import parse as urlparse import iso8601 from oslo_utils import timeutils -import six -import six.moves.urllib.parse as urlparse from webob import exc from nova.api.openstack import common @@ -40,7 +40,7 @@ def parse_strtime(dstr, fmt): try: return timeutils.parse_strtime(dstr, fmt) except (TypeError, ValueError) as e: - raise exception.InvalidStrTime(reason=six.text_type(e)) + raise exception.InvalidStrTime(reason=str(e)) class SimpleTenantUsageController(wsgi.Controller): @@ -142,7 +142,7 @@ def _tenant_usages_for_period(self, context, period_start, period_stop, instances = self._get_instances_all_cells(context, period_start, period_stop, tenant_id, limit, marker) - rval = {} + rval = collections.OrderedDict() flavors = {} all_server_usages = [] @@ -262,7 +262,8 @@ def _get_datetime_range(self, req): return (period_start, period_stop, detailed) @wsgi.Controller.api_version("2.40") - @validation.query_schema(schema.index_query_v240) + @validation.query_schema(schema.index_query_275, '2.75') + @validation.query_schema(schema.index_query_v240, '2.40', '2.74') @wsgi.expected_errors(400) def index(self, req): """Retrieve tenant_usage for all tenants.""" @@ -271,12 +272,13 @@ def index(self, req): @wsgi.Controller.api_version("2.1", "2.39") # noqa @validation.query_schema(schema.index_query) @wsgi.expected_errors(400) - def index(self, req): + def index(self, req): # noqa """Retrieve tenant_usage for all tenants.""" return self._index(req) @wsgi.Controller.api_version("2.40") - @validation.query_schema(schema.show_query_v240) + @validation.query_schema(schema.show_query_275, '2.75') + @validation.query_schema(schema.show_query_v240, '2.40', '2.74') @wsgi.expected_errors(400) def show(self, req, id): """Retrieve tenant_usage for a specified tenant.""" @@ -285,7 +287,7 @@ def show(self, req, id): @wsgi.Controller.api_version("2.1", "2.39") # noqa @validation.query_schema(schema.show_query) @wsgi.expected_errors(400) - def show(self, req, id): + def show(self, req, id): # noqa """Retrieve tenant_usage for a specified tenant.""" return self._show(req, id) diff --git a/nova/api/openstack/compute/suspend_server.py b/nova/api/openstack/compute/suspend_server.py index 61f89d35dd9..8495e1eb921 100644 --- a/nova/api/openstack/compute/suspend_server.py +++ b/nova/api/openstack/compute/suspend_server.py @@ -16,18 +16,18 @@ from nova.api.openstack import common from nova.api.openstack import wsgi -from nova import compute +from nova.compute import api as compute from nova import exception from nova.policies import suspend_server as ss_policies class SuspendServerController(wsgi.Controller): - def __init__(self, *args, **kwargs): - super(SuspendServerController, self).__init__(*args, **kwargs) + def __init__(self): + super(SuspendServerController, self).__init__() self.compute_api = compute.API() @wsgi.response(202) - @wsgi.expected_errors((404, 409)) + @wsgi.expected_errors((403, 404, 409, 400)) @wsgi.action('suspend') def _suspend(self, req, id, body): """Permit admins to suspend the server.""" @@ -38,13 +38,13 @@ def _suspend(self, req, id, body): target={'user_id': server.user_id, 'project_id': server.project_id}) self.compute_api.suspend(context, server) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'suspend', id) + except exception.ForbiddenPortsWithAccelerator as e: + raise exc.HTTPBadRequest(explanation=e.format_message()) @wsgi.response(202) @wsgi.expected_errors((404, 409)) @@ -52,12 +52,11 @@ def _suspend(self, req, id, body): def _resume(self, req, id, body): """Permit admins to resume the server from suspend.""" context = req.environ['nova.context'] - context.can(ss_policies.POLICY_ROOT % 'resume') server = common.get_instance(self.compute_api, context, id) + context.can(ss_policies.POLICY_ROOT % 'resume', + target={'project_id': server.project_id}) try: self.compute_api.resume(context, server) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: diff --git a/nova/api/openstack/compute/tenant_networks.py b/nova/api/openstack/compute/tenant_networks.py index f85b2ceed9e..69d77723a9d 100644 --- a/nova/api/openstack/compute/tenant_networks.py +++ b/nova/api/openstack/compute/tenant_networks.py @@ -13,24 +13,17 @@ # License for the specific language governing permissions and limitations # under the License. - -import netaddr -import netaddr.core as netexc from oslo_log import log as logging -import six from webob import exc from nova.api.openstack.api_version_request \ import MAX_PROXY_API_SUPPORT_VERSION -from nova.api.openstack.compute.schemas import tenant_networks as schema from nova.api.openstack import wsgi -from nova.api import validation import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ -import nova.network -from nova import objects +from nova.network import neutron from nova.policies import tenant_networks as tn_policies from nova import quota @@ -42,17 +35,22 @@ def network_dict(network): - # NOTE(danms): Here, network should be an object, which could have come - # from neutron and thus be missing most of the attributes. Providing a - # default to get() avoids trying to lazy-load missing attributes. - return {"id": network.get("uuid", None) or network.get("id", None), - "cidr": str(network.get("cidr", None)), - "label": network.get("label", None)} + # convert from a neutron response to something resembling what we used to + # produce with nova-network + return { + 'id': network.get('id'), + # yes, this is bananas, but this is what the API returned historically + # when using neutron instead of nova-network, so we keep on returning + # that + 'cidr': str(None), + 'label': network.get('name'), + } class TenantNetworkController(wsgi.Controller): - def __init__(self, network_api=None): - self.network_api = nova.network.API() + def __init__(self): + super(TenantNetworkController, self).__init__() + self.network_api = neutron.API() self._default_networks = [] def _refresh_default_networks(self): @@ -67,16 +65,14 @@ def _get_default_networks(self): project_id = CONF.api.neutron_default_tenant_id ctx = nova_context.RequestContext(user_id=None, project_id=project_id) - networks = {} - for n in self.network_api.get_all(ctx): - networks[n['id']] = n['label'] - return [{'id': k, 'label': v} for k, v in networks.items()] + return self.network_api.get_all(ctx) @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(()) def index(self, req): context = req.environ['nova.context'] - context.can(tn_policies.BASE_POLICY_NAME) + context.can(tn_policies.POLICY_NAME % 'list', + target={'project_id': context.project_id}) networks = list(self.network_api.get_all(context)) if not self._default_networks: self._refresh_default_networks() @@ -87,7 +83,8 @@ def index(self, req): @wsgi.expected_errors(404) def show(self, req, id): context = req.environ['nova.context'] - context.can(tn_policies.BASE_POLICY_NAME) + context.can(tn_policies.POLICY_NAME % 'show', + target={'project_id': context.project_id}) try: network = self.network_api.get(context, id) except exception.NetworkNotFound: @@ -95,99 +92,10 @@ def show(self, req, id): raise exc.HTTPNotFound(explanation=msg) return {'network': network_dict(network)} - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.expected_errors((403, 404, 409)) - @wsgi.response(202) + @wsgi.expected_errors(410) def delete(self, req, id): - context = req.environ['nova.context'] - context.can(tn_policies.BASE_POLICY_NAME) + raise exc.HTTPGone() - try: - self.network_api.disassociate(context, id) - self.network_api.delete(context, id) - except exception.PolicyNotAuthorized as e: - raise exc.HTTPForbidden(explanation=six.text_type(e)) - except exception.NetworkInUse as e: - raise exc.HTTPConflict(explanation=e.format_message()) - except exception.NetworkNotFound: - msg = _("Network not found") - raise exc.HTTPNotFound(explanation=msg) - - @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) - @wsgi.expected_errors((400, 403, 409, 503)) - @validation.schema(schema.create) + @wsgi.expected_errors(410) def create(self, req, body): - context = req.environ["nova.context"] - context.can(tn_policies.BASE_POLICY_NAME) - - network = body["network"] - keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size", - "num_networks"] - kwargs = {k: network.get(k) for k in keys} - - label = network["label"] - - if kwargs["cidr"]: - try: - net = netaddr.IPNetwork(kwargs["cidr"]) - if net.size < 4: - msg = _("Requested network does not contain " - "enough (2+) usable hosts") - raise exc.HTTPBadRequest(explanation=msg) - except netexc.AddrConversionError: - msg = _("Address could not be converted.") - raise exc.HTTPBadRequest(explanation=msg) - - try: - if CONF.enable_network_quota: - objects.Quotas.check_deltas(context, {'networks': 1}, - context.project_id) - except exception.OverQuota: - msg = _("Quota exceeded, too many networks.") - raise exc.HTTPForbidden(explanation=msg) - - kwargs['project_id'] = context.project_id - - try: - networks = self.network_api.create(context, - label=label, **kwargs) - except exception.PolicyNotAuthorized as e: - raise exc.HTTPForbidden(explanation=six.text_type(e)) - except exception.CidrConflict as e: - raise exc.HTTPConflict(explanation=e.format_message()) - except Exception: - msg = _("Create networks failed") - LOG.exception(msg, extra=network) - raise exc.HTTPServiceUnavailable(explanation=msg) - - # NOTE(melwitt): We recheck the quota after creating the object to - # prevent users from allocating more resources than their allowed quota - # in the event of a race. This is configurable because it can be - # expensive if strict quota limits are not required in a deployment. - if CONF.quota.recheck_quota and CONF.enable_network_quota: - try: - objects.Quotas.check_deltas(context, {'networks': 0}, - context.project_id) - except exception.OverQuota: - self.network_api.delete(context, - network_dict(networks[0])['id']) - msg = _("Quota exceeded, too many networks.") - raise exc.HTTPForbidden(explanation=msg) - - return {"network": network_dict(networks[0])} - - -def _network_count(context, project_id): - # NOTE(melwitt): This assumes a single cell. - ctx = nova_context.RequestContext(user_id=None, project_id=project_id) - ctx = ctx.elevated() - networks = nova.network.api.API().get_all(ctx) - return {'project': {'networks': len(networks)}} - - -def _register_network_quota(): - if CONF.enable_network_quota: - QUOTAS.register_resource(quota.CountableResource('networks', - _network_count, - 'quota_networks')) -_register_network_quota() + raise exc.HTTPGone() diff --git a/nova/api/openstack/compute/used_limits.py b/nova/api/openstack/compute/used_limits.py deleted file mode 100644 index 5b4f36ad59e..00000000000 --- a/nova/api/openstack/compute/used_limits.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from nova.api.openstack import api_version_request -from nova.api.openstack.api_version_request \ - import MIN_WITHOUT_PROXY_API_SUPPORT_VERSION -from nova.api.openstack import wsgi -from nova.policies import used_limits as ul_policies -from nova import quota - - -QUOTAS = quota.QUOTAS - - -class UsedLimitsController(wsgi.Controller): - - @wsgi.extends - @wsgi.expected_errors(()) - def index(self, req, resp_obj): - context = req.environ['nova.context'] - project_id = self._project_id(context, req) - quotas = QUOTAS.get_project_quotas(context, project_id, usages=True) - if api_version_request.is_supported( - req, min_version=MIN_WITHOUT_PROXY_API_SUPPORT_VERSION): - quota_map = { - 'totalRAMUsed': 'ram', - 'totalCoresUsed': 'cores', - 'totalInstancesUsed': 'instances', - 'totalServerGroupsUsed': 'server_groups', - } - else: - quota_map = { - 'totalRAMUsed': 'ram', - 'totalCoresUsed': 'cores', - 'totalInstancesUsed': 'instances', - 'totalFloatingIpsUsed': 'floating_ips', - 'totalSecurityGroupsUsed': 'security_groups', - 'totalServerGroupsUsed': 'server_groups', - } - - used_limits = {} - for display_name, key in quota_map.items(): - if key in quotas: - used_limits[display_name] = quotas[key]['in_use'] - - resp_obj.obj['limits']['absolute'].update(used_limits) - - def _project_id(self, context, req): - if 'tenant_id' in req.GET: - tenant_id = req.GET.get('tenant_id') - target = { - 'project_id': tenant_id, - 'user_id': context.user_id - } - context.can(ul_policies.BASE_POLICY_NAME, target) - return tenant_id - return context.project_id diff --git a/nova/api/openstack/compute/versions.py b/nova/api/openstack/compute/versions.py index 480b2af1f0b..be45eb01017 100644 --- a/nova/api/openstack/compute/versions.py +++ b/nova/api/openstack/compute/versions.py @@ -100,3 +100,18 @@ def get_action_args(self, request_environment): args['action'] = 'multi' return args + + +class VersionsV2(wsgi.Resource): + + def __init__(self): + super(VersionsV2, self).__init__(None) + + def index(self, req, body=None): + builder = views_versions.get_view_builder(req) + ver = 'v2.0' if req.is_legacy_v2() else 'v2.1' + return builder.build_version(VERSIONS[ver]) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + return {'action': 'index'} diff --git a/nova/api/openstack/compute/views/addresses.py b/nova/api/openstack/compute/views/addresses.py index 1c4986f6dbd..20a5b0ff0a5 100644 --- a/nova/api/openstack/compute/views/addresses.py +++ b/nova/api/openstack/compute/views/addresses.py @@ -24,7 +24,7 @@ class ViewBuilder(common.ViewBuilder): _collection_name = "addresses" - def basic(self, ip, extend_address=False): + def basic(self, request, ip, extend_address=False): """Return a dictionary describing an IP address.""" address = { "version": ip["version"], @@ -37,15 +37,17 @@ def basic(self, ip, extend_address=False): }) return address - def show(self, network, label, extend_address=False): + def show(self, request, network, label, extend_address=False): """Returns a dictionary describing a network.""" all_ips = itertools.chain(network["ips"], network["floating_ips"]) - return {label: [self.basic(ip, extend_address) for ip in all_ips]} + return { + label: [self.basic(request, ip, extend_address) for ip in all_ips], + } - def index(self, networks, extend_address=False): + def index(self, request, networks, extend_address=False): """Return a dictionary describing a list of networks.""" addresses = collections.OrderedDict() for label, network in networks.items(): - network_dict = self.show(network, label, extend_address) + network_dict = self.show(request, network, label, extend_address) addresses[label] = network_dict[label] - return dict(addresses=addresses) + return {'addresses': addresses} diff --git a/nova/api/openstack/compute/views/flavors.py b/nova/api/openstack/compute/views/flavors.py index a7b2a045ddc..1df4f093ba0 100644 --- a/nova/api/openstack/compute/views/flavors.py +++ b/nova/api/openstack/compute/views/flavors.py @@ -15,8 +15,6 @@ from nova.api.openstack import api_version_request from nova.api.openstack import common -from nova.policies import flavor_access as fa_policies -from nova.policies import flavor_rxtx as fr_policies FLAVOR_DESCRIPTION_MICROVERSION = '2.55' FLAVOR_EXTRA_SPECS_MICROVERSION = '2.61' @@ -27,12 +25,10 @@ class ViewBuilder(common.ViewBuilder): _collection_name = "flavors" def basic(self, request, flavor, include_description=False, - update_is_public=None, update_rxtx_factor=None, include_extra_specs=False): - # include_extra_specs & update_is_public & update_rxtx_factor are - # placeholder param which are not used in this method as basic() method - # is used by index() (GET /flavors) which does not return those keys in - # response. + # include_extra_specs is placeholder param which is not used in + # this method as basic() method is used by index() (GET /flavors) + # which does not return those keys in response. flavor_dict = { "flavor": { "id": flavor["flavorid"], @@ -49,7 +45,6 @@ def basic(self, request, flavor, include_description=False, return flavor_dict def show(self, request, flavor, include_description=False, - update_is_public=None, update_rxtx_factor=None, include_extra_specs=False): flavor_dict = { "flavor": { @@ -61,6 +56,8 @@ def show(self, request, flavor, include_description=False, "OS-FLV-EXT-DATA:ephemeral": flavor["ephemeral_gb"], "OS-FLV-DISABLED:disabled": flavor["disabled"], "vcpus": flavor["vcpus"], + "os-flavor-access:is_public": flavor['is_public'], + "rxtx_factor": flavor['rxtx_factor'] or "", "links": self._get_links(request, flavor["flavorid"], self._collection_name), @@ -73,25 +70,8 @@ def show(self, request, flavor, include_description=False, if include_extra_specs: flavor_dict['flavor']['extra_specs'] = flavor.extra_specs - # TODO(gmann): 'update_is_public' & 'update_rxtx_factor' are policies - # checks. Once os-flavor-access & os-flavor-rxtx policies are - # removed, 'os-flavor-access:is_public' and 'rxtx_factor' need to be - # added in response without any check. - - # Evaluate the policies when using show method directly. - context = request.environ['nova.context'] - if update_is_public is None: - update_is_public = context.can(fa_policies.BASE_POLICY_NAME, - fatal=False) - if update_rxtx_factor is None: - update_rxtx_factor = context.can(fr_policies.BASE_POLICY_NAME, - fatal=False) - if update_is_public: - flavor_dict['flavor'].update({ - "os-flavor-access:is_public": flavor['is_public']}) - if update_rxtx_factor: - flavor_dict['flavor'].update( - {"rxtx_factor": flavor['rxtx_factor'] or ""}) + if api_version_request.is_supported(request, '2.75'): + flavor_dict['flavor']['swap'] = flavor["swap"] or 0 return flavor_dict @@ -108,20 +88,12 @@ def detail(self, request, flavors, include_extra_specs=False): coll_name = self._collection_name + '/detail' include_description = api_version_request.is_supported( request, FLAVOR_DESCRIPTION_MICROVERSION) - context = request.environ['nova.context'] - update_is_public = context.can(fa_policies.BASE_POLICY_NAME, - fatal=False) - update_rxtx_factor = context.can(fr_policies.BASE_POLICY_NAME, - fatal=False) return self._list_view(self.show, request, flavors, coll_name, include_description=include_description, - update_is_public=update_is_public, - update_rxtx_factor=update_rxtx_factor, include_extra_specs=include_extra_specs) def _list_view(self, func, request, flavors, coll_name, - include_description=False, update_is_public=None, - update_rxtx_factor=None, include_extra_specs=False): + include_description=False, include_extra_specs=False): """Provide a view for a list of flavors. :param func: Function used to format the flavor data @@ -131,17 +103,12 @@ def _list_view(self, func, request, flavors, coll_name, for a pagination query :param include_description: If the flavor.description should be included in the response dict. - :param update_is_public: If the flavor.is_public field should be - included in the response dict. - :param update_rxtx_factor: If the flavor.rxtx_factor field should be - included in the response dict. :param include_extra_specs: If the flavor.extra_specs should be included in the response dict. :returns: Flavor reply data in dictionary format """ flavor_list = [func(request, flavor, include_description, - update_is_public, update_rxtx_factor, include_extra_specs)["flavor"] for flavor in flavors] flavors_links = self._get_collection_links(request, diff --git a/nova/api/openstack/compute/views/hypervisors.py b/nova/api/openstack/compute/views/hypervisors.py index 2284245993f..370e383166b 100644 --- a/nova/api/openstack/compute/views/hypervisors.py +++ b/nova/api/openstack/compute/views/hypervisors.py @@ -17,7 +17,7 @@ class ViewBuilder(common.ViewBuilder): - _collection_name = "hypervisors" + _collection_name = "os-hypervisors" def get_links(self, request, hypervisors, detail=False): coll_name = (self._collection_name + '/detail' if detail else diff --git a/nova/api/openstack/compute/views/images.py b/nova/api/openstack/compute/views/images.py index 5cf7cab95bb..1eb8ed5fb79 100644 --- a/nova/api/openstack/compute/views/images.py +++ b/nova/api/openstack/compute/views/images.py @@ -49,6 +49,7 @@ def show(self, request, image): "updated": self._format_date(image.get("updated_at")), "status": self._get_status(image), "progress": self._get_progress(image), + "OS-EXT-IMG-SIZE:size": image.get("size"), "links": self._get_links(request, image["id"], self._collection_name), diff --git a/nova/api/openstack/compute/views/keypairs.py b/nova/api/openstack/compute/views/keypairs.py index 020c7a0ac86..202be5ac315 100644 --- a/nova/api/openstack/compute/views/keypairs.py +++ b/nova/api/openstack/compute/views/keypairs.py @@ -18,8 +18,59 @@ class ViewBuilder(common.ViewBuilder): - _collection_name = "keypairs" + _collection_name = 'os-keypairs' + # TODO(takashin): After v2 and v2.1 is no longer supported, + # 'type' can always be included in the response. + _index_params = ('name', 'public_key', 'fingerprint') + _create_params = _index_params + ('user_id',) + _show_params = _create_params + ('created_at', 'deleted', 'deleted_at', + 'id', 'updated_at') + _index_params_v2_2 = _index_params + ('type',) + _show_params_v2_2 = _show_params + ('type',) def get_links(self, request, keypairs): return self._get_collection_links(request, keypairs, self._collection_name, 'name') + + # TODO(oomichi): It is necessary to filter a response of keypair with + # _build_keypair() when v2.1+microversions for implementing consistent + # behaviors in this keypair resource. + @staticmethod + def _build_keypair(keypair, attrs): + body = {} + for attr in attrs: + body[attr] = keypair[attr] + return body + + def create(self, keypair, private_key=False, key_type=False): + params = [] + if private_key: + params.append('private_key') + # TODO(takashin): After v2 and v2.1 is no longer supported, + # 'type' can always be included in the response. + if key_type: + params.append('type') + params.extend(self._create_params) + + return {'keypair': self._build_keypair(keypair, params)} + + def index(self, req, key_pairs, key_type=False, links=False): + keypairs_list = [ + {'keypair': self._build_keypair( + key_pair, + self._index_params_v2_2 if key_type else self._index_params)} + for key_pair in key_pairs] + keypairs_dict = {'keypairs': keypairs_list} + + if links: + keypairs_links = self.get_links(req, key_pairs) + + if keypairs_links: + keypairs_dict['keypairs_links'] = keypairs_links + + return keypairs_dict + + def show(self, keypair, key_type=False): + return {'keypair': self._build_keypair( + keypair, self._show_params_v2_2 if key_type + else self._show_params)} diff --git a/nova/api/openstack/compute/views/limits.py b/nova/api/openstack/compute/views/limits.py index db935a56e95..38ea722594b 100644 --- a/nova/api/openstack/compute/views/limits.py +++ b/nova/api/openstack/compute/views/limits.py @@ -35,22 +35,29 @@ def __init__(self): "server_group_members": ["maxServerGroupMembers"] } - def build(self, absolute_limits, filtered_limits=None, + def build(self, request, quotas, filtered_limits=None, max_image_meta=True): + filtered_limits = filtered_limits or [] absolute_limits = self._build_absolute_limits( - absolute_limits, filtered_limits, + quotas, filtered_limits, max_image_meta=max_image_meta) + per_flavor_limits = self._build_per_flavor_limits(quotas) + + used_limits = self._build_used_limits( + request, quotas, filtered_limits) + absolute_limits.update(used_limits) output = { "limits": { "rate": [], "absolute": absolute_limits, + "absolutePerFlavor": per_flavor_limits, }, } return output - def _build_absolute_limits(self, absolute_limits, filtered_limits=None, + def _build_absolute_limits(self, quotas, filtered_limits=None, max_image_meta=True): """Builder for absolute limits @@ -60,7 +67,7 @@ def _build_absolute_limits(self, absolute_limits, filtered_limits=None, filtered_limits is an optional list of limits to exclude from the result set. """ - filtered_limits = filtered_limits or [] + absolute_limits = {k: v['limit'] for k, v in quotas.items()} limits = {} for name, value in absolute_limits.items(): if (name in self.limit_names and @@ -70,3 +77,31 @@ def _build_absolute_limits(self, absolute_limits, filtered_limits=None, continue limits[limit_name] = value return limits + + def _build_used_limits(self, request, quotas, filtered_limits): + quota_map = { + 'totalRAMUsed': 'ram', + 'totalCoresUsed': 'cores', + 'totalInstancesUsed': 'instances', + 'totalFloatingIpsUsed': 'floating_ips', + 'totalSecurityGroupsUsed': 'security_groups', + 'totalServerGroupsUsed': 'server_groups', + } + used_limits = {} + for display_name, key in quota_map.items(): + if (key in quotas and key not in filtered_limits): + used_limits[display_name] = quotas[key]['in_use'] + + return used_limits + + def _build_per_flavor_limits(self, quotas): + limits = {} + for name, value in quotas.items(): + if name.startswith('instances_'): + flavorname = name[10:] + limits[flavorname] = { + 'maxTotalInstances': value['limit'], + 'totalInstancesUsed': value['in_use'], + } + + return limits diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py index e7d339e12d3..c14eee53258 100644 --- a/nova/api/openstack/compute/views/servers.py +++ b/nova/api/openstack/compute/views/servers.py @@ -15,16 +15,25 @@ # under the License. from oslo_log import log as logging +from oslo_serialization import jsonutils from nova.api.openstack import api_version_request from nova.api.openstack import common from nova.api.openstack.compute.views import addresses as views_addresses from nova.api.openstack.compute.views import flavors as views_flavors from nova.api.openstack.compute.views import images as views_images +from nova import availability_zones as avail_zone +from nova.compute import api as compute +from nova.compute import vm_states from nova import context as nova_context from nova import exception +from nova.network import security_group_api from nova import objects +from nova.objects import fields +from nova.objects import virtual_interface +from nova.policies import extended_server_attributes as esa_policies from nova.policies import flavor_extra_specs as fes_policies +from nova.policies import servers as servers_policies from nova import utils @@ -60,10 +69,12 @@ def __init__(self): self._address_builder = views_addresses.ViewBuilder() self._image_builder = views_images.ViewBuilder() self._flavor_builder = views_flavors.ViewBuilder() + self.compute_api = compute.API() def create(self, request, instance): """View that should be returned when an instance is created.""" - return { + + server = { "server": { "id": instance["uuid"], "links": self._get_links(request, @@ -76,9 +87,31 @@ def create(self, request, instance): 'AUTO' if instance.get('auto_disk_config') else 'MANUAL'), }, } + self._add_security_grps(request, [server["server"]], [instance], + create_request=True) + + return server - def basic(self, request, instance, show_extra_specs=False): + def basic(self, request, instance, show_extra_specs=False, + show_extended_attr=None, show_host_status=None, + show_sec_grp=None, bdms=None, cell_down_support=False, + show_user_data=False): """Generic, non-detailed view of an instance.""" + if cell_down_support and 'display_name' not in instance: + # NOTE(tssurya): If the microversion is >= 2.69, this boolean will + # be true in which case we check if there are instances from down + # cells (by checking if their objects have missing keys like + # `display_name`) and return partial constructs based on the + # information available from the nova_api database. + return { + "server": { + "id": instance.uuid, + "status": "UNKNOWN", + "links": self._get_links(request, + instance.uuid, + self._collection_name), + }, + } return { "server": { "id": instance["uuid"], @@ -109,12 +142,91 @@ def get_show_expected_attrs(self, expected_attrs=None): # results. return sorted(list(set(self._show_expected_attrs + expected_attrs))) + def _show_from_down_cell(self, request, instance, show_extra_specs, + show_server_groups): + """Function that constructs the partial response for the instance.""" + ret = { + "server": { + "id": instance.uuid, + "status": "UNKNOWN", + "tenant_id": instance.project_id, + "created": utils.isotime(instance.created_at), + "links": self._get_links( + request, instance.uuid, self._collection_name), + }, + } + if 'flavor' in instance: + # If the key 'flavor' is present for an instance from a down cell + # it means that the request is ``GET /servers/{server_id}`` and + # thus we include the information from the request_spec of the + # instance like its flavor, image, avz, and user_id in addition to + # the basic information from its instance_mapping. + # If 'flavor' key is not present for an instance from a down cell + # down cell it means the request is ``GET /servers/detail`` and we + # do not expose the flavor in the response when listing servers + # with details for performance reasons of fetching it from the + # request specs table for the whole list of instances. + ret["server"]["image"] = self._get_image(request, instance) + ret["server"]["flavor"] = self._get_flavor(request, instance, + show_extra_specs) + # in case availability zone was not requested by the user during + # boot time, return UNKNOWN. + avz = instance.availability_zone or "UNKNOWN" + ret["server"]["OS-EXT-AZ:availability_zone"] = avz + ret["server"]["OS-EXT-STS:power_state"] = instance.power_state + # in case its an old request spec which doesn't have the user_id + # data migrated, return UNKNOWN. + ret["server"]["user_id"] = instance.user_id or "UNKNOWN" + if show_server_groups: + context = request.environ['nova.context'] + ret['server']['server_groups'] = self._get_server_groups( + context, instance) + return ret + + @staticmethod + def _get_host_status_unknown_only(context, instance=None): + """We will use the unknown_only variable to tell us what host status we + can show, if any: + * unknown_only = False means we can show any host status. + * unknown_only = True means that we can only show host + status: UNKNOWN. If the host status is anything other than + UNKNOWN, we will not include the host_status field in the + response. + * unknown_only = None means we cannot show host status at all and + we will not include the host_status field in the response. + """ + unknown_only = None + # Check show:host_status policy first because if it passes, we know we + # can show any host status and need not check the more restrictive + # show:host_status:unknown-only policy. + # Keeping target as None (which means policy will default these target + # to context.project_id) for now which is case of 'detail' API which + # policy is default to system and project reader. + target = None + if instance is not None: + target = {'project_id': instance.project_id} + if context.can( + servers_policies.SERVERS % 'show:host_status', + fatal=False, target=target): + unknown_only = False + # If we are not allowed to show any/all host status, check if we can at + # least show only the host status: UNKNOWN. + elif context.can( + servers_policies.SERVERS % + 'show:host_status:unknown-only', + fatal=False, + target=target): + unknown_only = True + return unknown_only + def show(self, request, instance, extend_address=True, - show_extra_specs=None): + show_extra_specs=None, show_AZ=True, show_config_drive=True, + show_extended_attr=None, show_host_status=None, + show_keypair=True, show_srv_usg=True, show_sec_grp=True, + show_extended_status=True, show_extended_volumes=True, + bdms=None, cell_down_support=False, show_server_groups=False, + show_user_data=True): """Detailed view of a single instance.""" - ip_v4 = instance.get('access_ip_v4') - ip_v6 = instance.get('access_ip_v6') - if show_extra_specs is None: # detail will pre-calculate this for us. If we're doing show, # then figure it out here. @@ -124,6 +236,17 @@ def show(self, request, instance, extend_address=True, show_extra_specs = context.can( fes_policies.POLICY_ROOT % 'index', fatal=False) + if cell_down_support and 'display_name' not in instance: + # NOTE(tssurya): If the microversion is >= 2.69, this boolean will + # be true in which case we check if there are instances from down + # cells (by checking if their objects have missing keys like + # `display_name`) and return partial constructs based on the + # information available from the nova_api database. + return self._show_from_down_cell( + request, instance, show_extra_specs, show_server_groups) + ip_v4 = instance.get('access_ip_v4') + ip_v6 = instance.get('access_ip_v6') + server = { "server": { "id": instance["uuid"], @@ -160,10 +283,118 @@ def show(self, request, instance, extend_address=True, if server["server"]["status"] in self._progress_statuses: server["server"]["progress"] = instance.get("progress", 0) + context = request.environ['nova.context'] + if show_AZ: + az = avail_zone.get_instance_availability_zone(context, instance) + # NOTE(mriedem): The OS-EXT-AZ prefix should not be used for new + # attributes after v2.1. They are only in v2.1 for backward compat + # with v2.0. + server["server"]["OS-EXT-AZ:availability_zone"] = az or '' + + if show_config_drive: + server["server"]["config_drive"] = instance["config_drive"] + + if show_keypair: + server["server"]["key_name"] = instance["key_name"] + + if show_srv_usg: + for k in ['launched_at', 'terminated_at']: + key = "OS-SRV-USG:" + k + # NOTE(danms): Historically, this timestamp has been generated + # merely by grabbing str(datetime) of a TZ-naive object. The + # only way we can keep that with instance objects is to strip + # the tzinfo from the stamp and str() it. + server["server"][key] = (instance[k].replace(tzinfo=None) + if instance[k] else None) + if show_sec_grp: + self._add_security_grps(request, [server["server"]], [instance]) + + if show_extended_attr is None: + show_extended_attr = context.can( + esa_policies.BASE_POLICY_NAME, fatal=False, + target={'project_id': instance.project_id}) + + if show_extended_attr: + properties = ['host', 'name', 'node'] + if api_version_request.is_supported(request, min_version='2.3'): + # NOTE(mriedem): These will use the OS-EXT-SRV-ATTR prefix + # below and that's OK for microversion 2.3 which is being + # compatible with v2.0 for the ec2 API split out from Nova. + # After this, however, new microversions should not be using + # the OS-EXT-SRV-ATTR prefix. + properties += ['reservation_id', 'launch_index', + 'hostname', 'kernel_id', 'ramdisk_id', + 'root_device_name'] + # NOTE(gmann): Since microversion 2.75, PUT and Rebuild + # response include all the server attributes including these + # extended attributes also. But microversion 2.57 already + # adding the 'user_data' in Rebuild response in API method. + # so we will skip adding the user data attribute for rebuild + # case. 'show_user_data' is false only in case of rebuild. + if show_user_data: + properties += ['user_data'] + for attr in properties: + if attr == 'name': + key = "OS-EXT-SRV-ATTR:instance_%s" % attr + elif attr == 'node': + key = "OS-EXT-SRV-ATTR:hypervisor_hostname" + else: + # NOTE(mriedem): Nothing after microversion 2.3 should use + # the OS-EXT-SRV-ATTR prefix for the attribute key name. + key = "OS-EXT-SRV-ATTR:%s" % attr + server["server"][key] = getattr(instance, attr) + + if show_extended_status: + # NOTE(gmann): Removed 'locked_by' from extended status + # to make it same as V2. If needed it can be added with + # microversion. + for state in ['task_state', 'vm_state', 'power_state']: + # NOTE(mriedem): The OS-EXT-STS prefix should not be used for + # new attributes after v2.1. They are only in v2.1 for backward + # compat with v2.0. + key = "%s:%s" % ('OS-EXT-STS', state) + server["server"][key] = instance[state] + + if show_extended_volumes: + # NOTE(mriedem): The os-extended-volumes prefix should not be used + # for new attributes after v2.1. They are only in v2.1 for backward + # compat with v2.0. + add_delete_on_termination = api_version_request.is_supported( + request, min_version='2.3') + if bdms is None: + bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid( + context, [instance["uuid"]]) + self._add_volumes_attachments(server["server"], + bdms, + add_delete_on_termination) + + if api_version_request.is_supported(request, min_version='2.16'): + if show_host_status is None: + unknown_only = self._get_host_status_unknown_only( + context, instance) + # If we're not allowed by policy to show host status at all, + # don't bother requesting instance host status from the compute + # API. + if unknown_only is not None: + host_status = self.compute_api.get_instance_host_status( + instance) + # If we are allowed to show host status of some kind, set + # the host status field only if: + # * unknown_only = False, meaning we can show any status + # OR + # * if unknown_only = True and host_status == UNKNOWN + if (not unknown_only or + host_status == fields.HostStatus.UNKNOWN): + server["server"]['host_status'] = host_status + if api_version_request.is_supported(request, min_version="2.9"): server["server"]["locked"] = (True if instance["locked_by"] else False) + if api_version_request.is_supported(request, min_version="2.73"): + server["server"]["locked_reason"] = (instance.system_metadata.get( + "locked_reason")) + if api_version_request.is_supported(request, min_version="2.19"): server["server"]["description"] = instance.get( "display_description") @@ -177,31 +408,74 @@ def show(self, request, instance, extend_address=True, trusted_certs = instance.trusted_certs.ids server["server"]["trusted_image_certificates"] = trusted_certs + # TODO(stephenfin): Remove this check once we remove the + # OS-EXT-SRV-ATTR:hostname policy checks from the policy is Y or later + if api_version_request.is_supported(request, min_version='2.90'): + # API 2.90 made this field visible to non-admins, but we only show + # it if it's not already added + if not show_extended_attr: + server["server"]["OS-EXT-SRV-ATTR:hostname"] = \ + instance.hostname + + if show_server_groups: + server['server']['server_groups'] = self._get_server_groups( + context, + instance) return server - def index(self, request, instances): + def index(self, request, instances, cell_down_support=False): """Show a list of servers without many details.""" coll_name = self._collection_name return self._list_view(self.basic, request, instances, coll_name, - False) + False, cell_down_support=cell_down_support) - def detail(self, request, instances): + def detail(self, request, instances, cell_down_support=False): """Detailed view of a list of instance.""" coll_name = self._collection_name + '/detail' + context = request.environ['nova.context'] if api_version_request.is_supported(request, min_version='2.47'): # Determine if we should show extra_specs in the inlined flavor # once before we iterate the list of instances - context = request.environ['nova.context'] show_extra_specs = context.can(fes_policies.POLICY_ROOT % 'index', fatal=False) else: show_extra_specs = False + show_extended_attr = context.can( + esa_policies.BASE_POLICY_NAME, fatal=False) + + instance_uuids = [inst['uuid'] for inst in instances] + bdms = self._get_instance_bdms_in_multiple_cells(context, + instance_uuids) + + # NOTE(gmann): pass show_sec_grp=False in _list_view() because + # security groups for detail method will be added by separate + # call to self._add_security_grps by passing the all servers + # together. That help to avoid multiple neutron call for each server. + servers_dict = self._list_view(self.show, request, instances, + coll_name, show_extra_specs, + show_extended_attr=show_extended_attr, + # We process host_status in aggregate. + show_host_status=False, + show_sec_grp=False, + bdms=bdms, + cell_down_support=cell_down_support) + + if api_version_request.is_supported(request, min_version='2.16'): + unknown_only = self._get_host_status_unknown_only(context) + # If we're not allowed by policy to show host status at all, don't + # bother requesting instance host status from the compute API. + if unknown_only is not None: + self._add_host_status(list(servers_dict["servers"]), instances, + unknown_only=unknown_only) + + self._add_security_grps(request, list(servers_dict["servers"]), + instances) + return servers_dict - return self._list_view(self.show, request, instances, coll_name, - show_extra_specs) - - def _list_view(self, func, request, servers, coll_name, show_extra_specs): + def _list_view(self, func, request, servers, coll_name, show_extra_specs, + show_extended_attr=None, show_host_status=None, + show_sec_grp=False, bdms=None, cell_down_support=False): """Provide a view for a list of servers. :param func: Function used to format the server data @@ -209,11 +483,29 @@ def _list_view(self, func, request, servers, coll_name, show_extra_specs): :param servers: List of servers in dictionary format :param coll_name: Name of collection, used to generate the next link for a pagination query + :param show_extended_attr: If the server extended attributes should be + included in the response dict. + :param show_host_status: If the host status should be included in + the response dict. + :param show_sec_grp: If the security group should be included in + the response dict. + :param bdms: Instances bdms info from multiple cells. + :param cell_down_support: True if the API (and caller) support + returning a minimal instance + construct if the relevant cell is + down. :returns: Server data in dictionary format """ server_list = [func(request, server, - show_extra_specs=show_extra_specs)["server"] - for server in servers] + show_extra_specs=show_extra_specs, + show_extended_attr=show_extended_attr, + show_host_status=show_host_status, + show_sec_grp=show_sec_grp, bdms=bdms, + cell_down_support=cell_down_support)["server"] + for server in servers + # Filter out the fake marker instance created by the + # fill_virtual_interface_list online data migration. + if server.uuid != virtual_interface.FAKE_UUID] servers_links = self._get_collection_links(request, servers, coll_name) @@ -243,10 +535,16 @@ def _get_host_id(instance): return utils.generate_hostid(host, project) def _get_addresses(self, request, instance, extend_address=False): + # Hide server addresses while the server is building. + if instance.vm_state == vm_states.BUILDING: + return {} + context = request.environ["nova.context"] networks = common.get_networks_for_instance(context, instance) - return self._address_builder.index(networks, - extend_address)["addresses"] + + return self._address_builder.index( + request, networks, extend_address, + )["addresses"] def _get_image(self, request, instance): image_ref = instance["image_ref"] @@ -265,34 +563,32 @@ def _get_image(self, request, instance): else: return "" - def _get_flavor_dict(self, request, instance_type, show_extra_specs): + def _get_flavor_dict(self, request, flavor, show_extra_specs): flavordict = { - "vcpus": instance_type.vcpus, - "ram": instance_type.memory_mb, - "disk": instance_type.root_gb, - "ephemeral": instance_type.ephemeral_gb, - "swap": instance_type.swap, - "original_name": instance_type.name + "vcpus": flavor.vcpus, + "ram": flavor.memory_mb, + "disk": flavor.root_gb, + "ephemeral": flavor.ephemeral_gb, + "swap": flavor.swap, + "original_name": flavor.name } if show_extra_specs: - flavordict['extra_specs'] = instance_type.extra_specs + flavordict['extra_specs'] = flavor.extra_specs return flavordict def _get_flavor(self, request, instance, show_extra_specs): - instance_type = instance.get_flavor() - if not instance_type: - LOG.warning("Instance has had its instance_type removed " + flavor = instance.get_flavor() + if not flavor: + LOG.warning("Instance has had its flavor removed " "from the DB", instance=instance) return {} if api_version_request.is_supported(request, min_version="2.47"): - return self._get_flavor_dict(request, instance_type, - show_extra_specs) + return self._get_flavor_dict(request, flavor, show_extra_specs) - flavor_id = instance_type["flavorid"] - flavor_bookmark = self._flavor_builder._get_bookmark_link(request, - flavor_id, - "flavors") + flavor_id = flavor["flavorid"] + flavor_bookmark = self._flavor_builder._get_bookmark_link( + request, flavor_id, "flavors") return { "id": str(flavor_id), "links": [{ @@ -341,3 +637,120 @@ def _get_fault(self, request, instance): fault_dict['details'] = fault["details"] return fault_dict + + def _add_host_status(self, servers, instances, unknown_only=False): + """Adds the ``host_status`` field to the list of servers + + This method takes care to filter instances from down cells since they + do not have a host set and as such we cannot determine the host status. + + :param servers: list of detailed server dicts for the API response + body; this list is modified by reference by updating the server + dicts within the list + :param instances: list of Instance objects + :param unknown_only: whether to show only UNKNOWN host status + """ + # Filter out instances from down cells which do not have a host field. + instances = [instance for instance in instances if 'host' in instance] + # Get the dict, keyed by instance.uuid, of host status values. + host_statuses = self.compute_api.get_instances_host_statuses(instances) + for server in servers: + # Filter out anything that is not in the resulting dict because + # we had to filter the list of instances above for down cells. + if server['id'] in host_statuses: + host_status = host_statuses[server['id']] + if unknown_only and host_status != fields.HostStatus.UNKNOWN: + # Filter servers that are not allowed by policy to see + # host_status values other than UNKNOWN. + continue + server['host_status'] = host_status + + def _add_security_grps(self, req, servers, instances, + create_request=False): + if not len(servers): + return + + # If request is a POST create server we get the security groups + # intended for an instance from the request. This is necessary because + # the requested security groups for the instance have not yet been sent + # to neutron. + # Starting from microversion 2.75, security groups is returned in + # PUT and POST Rebuild response also. + if not create_request: + context = req.environ['nova.context'] + sg_instance_bindings = ( + security_group_api.get_instances_security_groups_bindings( + context, servers)) + for server in servers: + groups = sg_instance_bindings.get(server['id']) + if groups: + server['security_groups'] = groups + + # This section is for POST create server request. There can be + # only one security group for POST create server request. + else: + # try converting to json + req_obj = jsonutils.loads(req.body) + # Add security group to server, if no security group was in + # request add default since that is the group it is part of + servers[0]['security_groups'] = req_obj['server'].get( + 'security_groups', [{'name': 'default'}]) + + @staticmethod + def _get_instance_bdms_in_multiple_cells(ctxt, instance_uuids): + inst_maps = objects.InstanceMappingList.get_by_instance_uuids( + ctxt, instance_uuids) + + cell_mappings = {} + for inst_map in inst_maps: + if (inst_map.cell_mapping is not None and + inst_map.cell_mapping.uuid not in cell_mappings): + cell_mappings.update( + {inst_map.cell_mapping.uuid: inst_map.cell_mapping}) + + bdms = {} + results = nova_context.scatter_gather_cells( + ctxt, cell_mappings.values(), + nova_context.CELL_TIMEOUT, + objects.BlockDeviceMappingList.bdms_by_instance_uuid, + instance_uuids) + for cell_uuid, result in results.items(): + if isinstance(result, Exception): + LOG.warning('Failed to get block device mappings for cell %s', + cell_uuid) + elif result is nova_context.did_not_respond_sentinel: + LOG.warning('Timeout getting block device mappings for cell ' + '%s', cell_uuid) + else: + bdms.update(result) + return bdms + + def _add_volumes_attachments(self, server, bdms, + add_delete_on_termination): + # server['id'] is guaranteed to be in the cache due to + # the core API adding it in the 'detail' or 'show' method. + # If that instance has since been deleted, it won't be in the + # 'bdms' dictionary though, so use 'get' to avoid KeyErrors. + instance_bdms = bdms.get(server['id'], []) + volumes_attached = [] + for bdm in instance_bdms: + if bdm.get('volume_id'): + volume_attached = {'id': bdm['volume_id']} + if add_delete_on_termination: + volume_attached['delete_on_termination'] = ( + bdm['delete_on_termination']) + volumes_attached.append(volume_attached) + # NOTE(mriedem): The os-extended-volumes prefix should not be used for + # new attributes after v2.1. They are only in v2.1 for backward compat + # with v2.0. + key = "os-extended-volumes:volumes_attached" + server[key] = volumes_attached + + @staticmethod + def _get_server_groups(context, instance): + try: + sg = objects.InstanceGroup.get_by_instance_uuid(context, + instance.uuid) + return [sg.uuid] + except exception.InstanceGroupNotFound: + return [] diff --git a/nova/api/openstack/compute/volumes.py b/nova/api/openstack/compute/volumes.py index a97ae4e3718..003f96deab8 100644 --- a/nova/api/openstack/compute/volumes.py +++ b/nova/api/openstack/compute/volumes.py @@ -25,7 +25,7 @@ from nova.api.openstack.compute.schemas import volumes as volumes_schema from nova.api.openstack import wsgi from nova.api import validation -from nova import compute +from nova.compute import api as compute from nova.compute import vm_states from nova import exception from nova.i18n import _ @@ -34,8 +34,6 @@ from nova.policies import volumes_attachments as va_policies from nova.volume import cinder -ALIAS = "os-volumes" - def _translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" @@ -70,9 +68,18 @@ def _translate_volume_summary_view(context, vol): # } # } attachment = list(vol['attachments'].items())[0] - d['attachments'] = [_translate_attachment_detail_view(vol['id'], - attachment[0], - attachment[1].get('mountpoint'))] + d['attachments'] = [ + { + 'id': vol['id'], + 'volumeId': vol['id'], + 'serverId': attachment[0], + } + ] + + mountpoint = attachment[1].get('mountpoint') + if mountpoint: + d['attachments'][0]['device'] = mountpoint + else: d['attachments'] = [{}] @@ -98,15 +105,16 @@ class VolumeController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" def __init__(self): - self.volume_api = cinder.API() super(VolumeController, self).__init__() + self.volume_api = cinder.API() @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @wsgi.expected_errors(404) def show(self, req, id): """Return data about the given volume.""" context = req.environ['nova.context'] - context.can(vol_policies.BASE_POLICY_NAME) + context.can(vol_policies.POLICY_NAME % 'show', + target={'project_id': context.project_id}) try: vol = self.volume_api.get(context, id) @@ -121,7 +129,8 @@ def show(self, req, id): def delete(self, req, id): """Delete a volume.""" context = req.environ['nova.context'] - context.can(vol_policies.BASE_POLICY_NAME) + context.can(vol_policies.POLICY_NAME % 'delete', + target={'project_id': context.project_id}) try: self.volume_api.delete(context, id) @@ -135,6 +144,9 @@ def delete(self, req, id): @wsgi.expected_errors(()) def index(self, req): """Returns a summary list of volumes.""" + context = req.environ['nova.context'] + context.can(vol_policies.POLICY_NAME % 'list', + target={'project_id': context.project_id}) return self._items(req, entity_maker=_translate_volume_summary_view) @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @@ -142,12 +154,14 @@ def index(self, req): @wsgi.expected_errors(()) def detail(self, req): """Returns a detailed list of volumes.""" + context = req.environ['nova.context'] + context.can(vol_policies.POLICY_NAME % 'detail', + target={'project_id': context.project_id}) return self._items(req, entity_maker=_translate_volume_detail_view) def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" context = req.environ['nova.context'] - context.can(vol_policies.BASE_POLICY_NAME) volumes = self.volume_api.get_all(context) limited_list = common.limited(volumes, req) @@ -160,7 +174,8 @@ def _items(self, req, entity_maker): def create(self, req, body): """Creates a new volume.""" context = req.environ['nova.context'] - context.can(vol_policies.BASE_POLICY_NAME) + context.can(vol_policies.POLICY_NAME % 'create', + target={'project_id': context.project_id}) vol = body['volume'] @@ -209,29 +224,46 @@ def create(self, req, body): return wsgi.ResponseObject(result, headers=dict(location=location)) -def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint): - """Maps keys for attachment details view.""" +def _translate_attachment_detail_view( + bdm, + show_tag=False, + show_delete_on_termination=False, + show_attachment_id_bdm_uuid=False, +): + """Maps keys for attachment details view. + + :param bdm: BlockDeviceMapping object for an attached volume + :param show_tag: True if the "tag" field should be in the response, False + to exclude the "tag" field from the response + :param show_delete_on_termination: True if the "delete_on_termination" + field should be in the response, False to exclude the + "delete_on_termination" field from the response + :param show_attachment_id_bdm_uuid: True if the "attachment_id" and + "bdm_uuid" fields should be in the response. Also controls when the + "id" field is included. + """ - d = _translate_attachment_summary_view(volume_id, - instance_uuid, - mountpoint) + d = {} - # No additional data / lookups at the moment - return d + if not show_attachment_id_bdm_uuid: + d['id'] = bdm.volume_id + d['volumeId'] = bdm.volume_id -def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint): - """Maps keys for attachment summary view.""" - d = {} + d['serverId'] = bdm.instance_uuid + + if bdm.device_name: + d['device'] = bdm.device_name - # NOTE(justinsb): We use the volume id as the id of the attachment object - d['id'] = volume_id + if show_tag: + d['tag'] = bdm.tag - d['volumeId'] = volume_id + if show_delete_on_termination: + d['delete_on_termination'] = bdm.delete_on_termination - d['serverId'] = instance_uuid - if mountpoint: - d['device'] = mountpoint + if show_attachment_id_bdm_uuid: + d['attachment_id'] = bdm.attachment_id + d['bdm_uuid'] = bdm.uuid return d @@ -263,24 +295,33 @@ def __init__(self): super(VolumeAttachmentController, self).__init__() @wsgi.expected_errors(404) - @validation.query_schema(volumes_schema.index_query) + @validation.query_schema(volumes_schema.index_query_275, '2.75') + @validation.query_schema(volumes_schema.index_query, '2.0', '2.74') def index(self, req, server_id): """Returns the list of volume attachments for a given instance.""" context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'index') - instance = common.get_instance(self.compute_api, context, server_id) + context.can(va_policies.POLICY_ROOT % 'index', + target={'project_id': instance.project_id}) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) limited_list = common.limited(bdms, req) results = [] + show_tag = api_version_request.is_supported(req, '2.70') + show_delete_on_termination = api_version_request.is_supported( + req, '2.79') + show_attachment_id_bdm_uuid = api_version_request.is_supported( + req, '2.89') for bdm in limited_list: if bdm.volume_id: - va = _translate_attachment_summary_view(bdm.volume_id, - bdm.instance_uuid, - bdm.device_name) + va = _translate_attachment_detail_view( + bdm, + show_tag=show_tag, + show_delete_on_termination=show_delete_on_termination, + show_attachment_id_bdm_uuid=show_attachment_id_bdm_uuid, + ) results.append(va) return {'volumeAttachments': results} @@ -289,10 +330,11 @@ def index(self, req, server_id): def show(self, req, server_id, id): """Return data about the given volume attachment.""" context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'show') + instance = common.get_instance(self.compute_api, context, server_id) + context.can(va_policies.POLICY_ROOT % 'show', + target={'project_id': instance.project_id}) volume_id = id - instance = common.get_instance(self.compute_api, context, server_id) try: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( @@ -303,26 +345,38 @@ def show(self, req, server_id, id): {'instance': server_id, 'volume': volume_id}) raise exc.HTTPNotFound(explanation=msg) - assigned_mountpoint = bdm.device_name - return {'volumeAttachment': _translate_attachment_detail_view( - volume_id, - instance.uuid, - assigned_mountpoint)} + show_tag = api_version_request.is_supported(req, '2.70') + show_delete_on_termination = api_version_request.is_supported( + req, '2.79') + show_attachment_id_bdm_uuid = api_version_request.is_supported( + req, '2.89') + return { + 'volumeAttachment': _translate_attachment_detail_view( + bdm, + show_tag=show_tag, + show_delete_on_termination=show_delete_on_termination, + show_attachment_id_bdm_uuid=show_attachment_id_bdm_uuid, + ) + } # TODO(mriedem): This API should return a 202 instead of a 200 response. - @wsgi.expected_errors((400, 404, 409)) + @wsgi.expected_errors((400, 403, 404, 409)) @validation.schema(volumes_schema.create_volume_attachment, '2.0', '2.48') - @validation.schema(volumes_schema.create_volume_attachment_v249, '2.49') + @validation.schema(volumes_schema.create_volume_attachment_v249, '2.49', + '2.78') + @validation.schema(volumes_schema.create_volume_attachment_v279, '2.79') def create(self, req, server_id, body): """Attach a volume to an instance.""" context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'create') + instance = common.get_instance(self.compute_api, context, server_id) + context.can(va_policies.POLICY_ROOT % 'create', + target={'project_id': instance.project_id}) volume_id = body['volumeAttachment']['volumeId'] device = body['volumeAttachment'].get('device') tag = body['volumeAttachment'].get('tag') - - instance = common.get_instance(self.compute_api, context, server_id) + delete_on_termination = body['volumeAttachment'].get( + 'delete_on_termination', False) if instance.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): @@ -333,14 +387,12 @@ def create(self, req, server_id, body): supports_multiattach = common.supports_multiattach_volume(req) device = self.compute_api.attach_volume( context, instance, volume_id, device, tag=tag, - supports_multiattach=supports_multiattach) - except (exception.InstanceUnknownCell, - exception.VolumeNotFound) as e: + supports_multiattach=supports_multiattach, + delete_on_termination=delete_on_termination) + except exception.VolumeNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except (exception.InstanceIsLocked, - exception.DevicePathInUse, - exception.MultiattachNotSupportedByVirtDriver, - exception.MultiattachSupportNotYetAvailable) as e: + exception.DevicePathInUse) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, @@ -352,22 +404,27 @@ def create(self, req, server_id, body): exception.MultiattachNotSupportedOldMicroversion, exception.MultiattachToShelvedNotSupported) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) + except exception.TooManyDiskDevices as e: + raise exc.HTTPForbidden(explanation=e.format_message()) # The attach is async + # NOTE(mriedem): It would be nice to use + # _translate_attachment_summary_view here but that does not include + # the 'device' key if device is None or the empty string which would + # be a backward incompatible change. attachment = {} attachment['id'] = volume_id attachment['serverId'] = server_id attachment['volumeId'] = volume_id attachment['device'] = device + if api_version_request.is_supported(req, '2.70'): + attachment['tag'] = tag + if api_version_request.is_supported(req, '2.79'): + attachment['delete_on_termination'] = delete_on_termination return {'volumeAttachment': attachment} - @wsgi.response(202) - @wsgi.expected_errors((400, 404, 409)) - @validation.schema(volumes_schema.update_volume_attachment) - def update(self, req, server_id, id, body): + def _update_volume_swap(self, req, instance, id, body): context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'update') - old_volume_id = id try: old_volume = self.volume_api.get(context, old_volume_id) @@ -387,32 +444,96 @@ def update(self, req, server_id, id, body): # NotFound response if that is not existent. raise exc.HTTPBadRequest(explanation=e.format_message()) - instance = common.get_instance(self.compute_api, context, server_id) - try: self.compute_api.swap_volume(context, instance, old_volume, new_volume) except exception.VolumeBDMNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) - except exception.InvalidVolume as e: + except (exception.InvalidVolume, + exception.MultiattachSwapVolumeNotSupported) as e: raise exc.HTTPBadRequest(explanation=e.format_message()) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, - 'swap_volume', server_id) + 'swap_volume', instance.uuid) + + def _update_volume_regular(self, req, instance, id, body): + context = req.environ['nova.context'] + att = body['volumeAttachment'] + # NOTE(danms): We may be doing an update of regular parameters in + # the midst of a swap operation, so to find the original BDM, we need + # to use the old volume ID, which is the one in the path. + volume_id = id + + try: + bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( + context, volume_id, instance.uuid) + + # NOTE(danms): The attachment id is just the (current) volume id + if 'id' in att and att['id'] != volume_id: + raise exc.HTTPBadRequest(explanation='The id property is ' + 'not mutable') + if 'serverId' in att and att['serverId'] != instance.uuid: + raise exc.HTTPBadRequest(explanation='The serverId property ' + 'is not mutable') + if 'device' in att and att['device'] != bdm.device_name: + raise exc.HTTPBadRequest(explanation='The device property is ' + 'not mutable') + if 'tag' in att and att['tag'] != bdm.tag: + raise exc.HTTPBadRequest(explanation='The tag property is ' + 'not mutable') + if 'delete_on_termination' in att: + bdm.delete_on_termination = strutils.bool_from_string( + att['delete_on_termination'], strict=True) + bdm.save() + except exception.VolumeBDMNotFound as e: + raise exc.HTTPNotFound(explanation=e.format_message()) + + @wsgi.response(202) + @wsgi.expected_errors((400, 404, 409)) + @validation.schema(volumes_schema.update_volume_attachment, '2.0', '2.84') + @validation.schema(volumes_schema.update_volume_attachment_v285, + min_version='2.85') + def update(self, req, server_id, id, body): + context = req.environ['nova.context'] + instance = common.get_instance(self.compute_api, context, server_id) + attachment = body['volumeAttachment'] + volume_id = attachment['volumeId'] + only_swap = not api_version_request.is_supported(req, '2.85') + + # NOTE(brinzhang): If the 'volumeId' requested by the user is + # different from the 'id' in the url path, or only swap is allowed by + # the microversion, we should check the swap volume policy. + # otherwise, check the volume update policy. + if only_swap or id != volume_id: + context.can(va_policies.POLICY_ROOT % 'swap', target={}) + else: + context.can(va_policies.POLICY_ROOT % 'update', + target={'project_id': instance.project_id}) + + if only_swap: + # NOTE(danms): Original behavior is always call swap on PUT + self._update_volume_swap(req, instance, id, body) + else: + # NOTE(danms): New behavior is update any supported attachment + # properties first, and then call swap if volumeId differs + self._update_volume_regular(req, instance, id, body) + if id != volume_id: + self._update_volume_swap(req, instance, id, body) @wsgi.response(202) @wsgi.expected_errors((400, 403, 404, 409)) def delete(self, req, server_id, id): """Detach a volume from an instance.""" context = req.environ['nova.context'] - context.can(va_policies.POLICY_ROOT % 'delete') + instance = common.get_instance(self.compute_api, context, server_id, + expected_attrs=['device_metadata']) + context.can(va_policies.POLICY_ROOT % 'delete', + target={'project_id': instance.project_id}) volume_id = id - instance = common.get_instance(self.compute_api, context, server_id, - expected_attrs=['device_metadata']) if instance.vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED): _check_request_version(req, '2.20', 'detach_volume', @@ -439,11 +560,9 @@ def delete(self, req, server_id, id): self.compute_api.detach_volume(context, instance, volume) except exception.InvalidVolume as e: raise exc.HTTPBadRequest(explanation=e.format_message()) - except exception.InstanceUnknownCell as e: - raise exc.HTTPNotFound(explanation=e.format_message()) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) - except exception.InstanceIsLocked as e: + except (exception.InstanceIsLocked, exception.ServiceUnavailable) as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, @@ -486,7 +605,8 @@ def __init__(self): def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['nova.context'] - context.can(vol_policies.BASE_POLICY_NAME) + context.can(vol_policies.POLICY_NAME % 'snapshots:show', + target={'project_id': context.project_id}) try: vol = self.volume_api.get_snapshot(context, id) @@ -501,7 +621,8 @@ def show(self, req, id): def delete(self, req, id): """Delete a snapshot.""" context = req.environ['nova.context'] - context.can(vol_policies.BASE_POLICY_NAME) + context.can(vol_policies.POLICY_NAME % 'snapshots:delete', + target={'project_id': context.project_id}) try: self.volume_api.delete_snapshot(context, id) @@ -513,6 +634,9 @@ def delete(self, req, id): @wsgi.expected_errors(()) def index(self, req): """Returns a summary list of snapshots.""" + context = req.environ['nova.context'] + context.can(vol_policies.POLICY_NAME % 'snapshots:list', + target={'project_id': context.project_id}) return self._items(req, entity_maker=_translate_snapshot_summary_view) @wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION) @@ -520,12 +644,14 @@ def index(self, req): @wsgi.expected_errors(()) def detail(self, req): """Returns a detailed list of snapshots.""" + context = req.environ['nova.context'] + context.can(vol_policies.POLICY_NAME % 'snapshots:detail', + target={'project_id': context.project_id}) return self._items(req, entity_maker=_translate_snapshot_detail_view) def _items(self, req, entity_maker): """Returns a list of snapshots, transformed through entity_maker.""" context = req.environ['nova.context'] - context.can(vol_policies.BASE_POLICY_NAME) snapshots = self.volume_api.get_all_snapshots(context) limited_list = common.limited(snapshots, req) @@ -538,7 +664,8 @@ def _items(self, req, entity_maker): def create(self, req, body): """Creates a new snapshot.""" context = req.environ['nova.context'] - context.can(vol_policies.BASE_POLICY_NAME) + context.can(vol_policies.POLICY_NAME % 'snapshots:create', + target={'project_id': context.project_id}) snapshot = body['snapshot'] volume_id = snapshot['volume_id'] diff --git a/nova/api/openstack/placement/auth.py b/nova/api/openstack/placement/auth.py deleted file mode 100644 index ff2551e26fa..00000000000 --- a/nova/api/openstack/placement/auth.py +++ /dev/null @@ -1,102 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from keystonemiddleware import auth_token -from oslo_log import log as logging -from oslo_middleware import request_id -import webob.dec -import webob.exc - -from nova.api.openstack.placement import context - -LOG = logging.getLogger(__name__) - - -class Middleware(object): - - def __init__(self, application, **kwargs): - self.application = application - - -# NOTE(cdent): Only to be used in tests where auth is being faked. -class NoAuthMiddleware(Middleware): - """Require a token if one isn't present.""" - - def __init__(self, application): - self.application = application - - @webob.dec.wsgify - def __call__(self, req): - if req.environ['PATH_INFO'] == '/': - return self.application - - if 'X-Auth-Token' not in req.headers: - return webob.exc.HTTPUnauthorized() - - token = req.headers['X-Auth-Token'] - user_id, _sep, project_id = token.partition(':') - project_id = project_id or user_id - if user_id == 'admin': - roles = ['admin'] - else: - roles = [] - req.headers['X_USER_ID'] = user_id - req.headers['X_TENANT_ID'] = project_id - req.headers['X_ROLES'] = ','.join(roles) - return self.application - - -class PlacementKeystoneContext(Middleware): - """Make a request context from keystone headers.""" - - @webob.dec.wsgify - def __call__(self, req): - req_id = req.environ.get(request_id.ENV_REQUEST_ID) - - ctx = context.RequestContext.from_environ( - req.environ, request_id=req_id) - - if ctx.user_id is None and req.environ['PATH_INFO'] != '/': - LOG.debug("Neither X_USER_ID nor X_USER found in request") - return webob.exc.HTTPUnauthorized() - - req.environ['placement.context'] = ctx - return self.application - - -class PlacementAuthProtocol(auth_token.AuthProtocol): - """A wrapper on Keystone auth_token middleware. - - Does not perform verification of authentication tokens - for root in the API. - - """ - def __init__(self, app, conf): - self._placement_app = app - super(PlacementAuthProtocol, self).__init__(app, conf) - - def __call__(self, environ, start_response): - if environ['PATH_INFO'] == '/': - return self._placement_app(environ, start_response) - - return super(PlacementAuthProtocol, self).__call__( - environ, start_response) - - -def filter_factory(global_conf, **local_conf): - conf = global_conf.copy() - conf.update(local_conf) - - def auth_filter(app): - return PlacementAuthProtocol(app, conf) - return auth_filter diff --git a/nova/api/openstack/placement/context.py b/nova/api/openstack/placement/context.py deleted file mode 100644 index ee0786f494c..00000000000 --- a/nova/api/openstack/placement/context.py +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_context import context -from oslo_db.sqlalchemy import enginefacade - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import policy - - -@enginefacade.transaction_context_provider -class RequestContext(context.RequestContext): - - def can(self, action, target=None, fatal=True): - """Verifies that the given action is valid on the target in this - context. - - :param action: string representing the action to be checked. - :param target: As much information about the object being operated on - as possible. The target argument should be a dict instance or an - instance of a class that fully supports the Mapping abstract base - class and deep copying. For object creation this should be a - dictionary representing the location of the object e.g. - ``{'project_id': context.project_id}``. If None, then this default - target will be considered:: - - {'project_id': self.project_id, 'user_id': self.user_id} - :param fatal: if False, will return False when an - exception.PolicyNotAuthorized occurs. - :raises nova.api.openstack.placement.exception.PolicyNotAuthorized: - if verification fails and fatal is True. - :return: returns a non-False value (not necessarily "True") if - authorized and False if not authorized and fatal is False. - """ - if target is None: - target = {'project_id': self.project_id, - 'user_id': self.user_id} - try: - return policy.authorize(self, action, target) - except exception.PolicyNotAuthorized: - if fatal: - raise - return False diff --git a/nova/api/openstack/placement/db_api.py b/nova/api/openstack/placement/db_api.py deleted file mode 100644 index 792c168a384..00000000000 --- a/nova/api/openstack/placement/db_api.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Database context manager for placement database connection, kept in its -own file so the nova db_api (which has cascading imports) is not imported. -""" - - -from oslo_db.sqlalchemy import enginefacade - - -placement_context_manager = enginefacade.transaction_context() - - -def _get_db_conf(conf_group): - return dict(conf_group.items()) - - -def configure(conf): - # If [placement_database]/connection is not set in conf, then placement - # data will be stored in the nova_api database. - if conf.placement_database.connection is None: - placement_context_manager.configure( - **_get_db_conf(conf.api_database)) - else: - placement_context_manager.configure( - **_get_db_conf(conf.placement_database)) - - -def get_placement_engine(): - return placement_context_manager.get_legacy_facade().get_engine() - - -@enginefacade.transaction_context_provider -class DbContext(object): - """Stub class for db session handling outside of web requests.""" diff --git a/nova/api/openstack/placement/deploy.py b/nova/api/openstack/placement/deploy.py deleted file mode 100644 index 28dde5c164b..00000000000 --- a/nova/api/openstack/placement/deploy.py +++ /dev/null @@ -1,113 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Deployment handling for Placmenent API.""" - -from microversion_parse import middleware as mp_middleware -import oslo_middleware -from oslo_middleware import cors - -from nova.api.openstack.placement import auth -from nova.api.openstack.placement import db_api -from nova.api.openstack.placement import fault_wrap -from nova.api.openstack.placement import handler -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider -from nova.api.openstack.placement import requestlog -from nova.api.openstack.placement import util - - -# TODO(cdent): NAME points to the config project being used, so for -# now this is "nova" but we probably want "placement" eventually. -NAME = "nova" - - -def deploy(conf): - """Assemble the middleware pipeline leading to the placement app.""" - if conf.api.auth_strategy == 'noauth2': - auth_middleware = auth.NoAuthMiddleware - else: - # Do not use 'oslo_config_project' param here as the conf - # location may have been overridden earlier in the deployment - # process with OS_PLACEMENT_CONFIG_DIR in wsgi.py. - auth_middleware = auth.filter_factory( - {}, oslo_config_config=conf) - - # Pass in our CORS config, if any, manually as that's a) - # explicit, b) makes testing more straightfoward, c) let's - # us control the use of cors by the presence of its config. - conf.register_opts(cors.CORS_OPTS, 'cors') - if conf.cors.allowed_origin: - cors_middleware = oslo_middleware.CORS.factory( - {}, **conf.cors) - else: - cors_middleware = None - - context_middleware = auth.PlacementKeystoneContext - req_id_middleware = oslo_middleware.RequestId - microversion_middleware = mp_middleware.MicroversionMiddleware - fault_middleware = fault_wrap.FaultWrapper - request_log = requestlog.RequestLog - - application = handler.PlacementHandler() - # configure microversion middleware in the old school way - application = microversion_middleware( - application, microversion.SERVICE_TYPE, microversion.VERSIONS, - json_error_formatter=util.json_error_formatter) - - # NOTE(cdent): The ordering here is important. The list is ordered - # from the inside out. For a single request req_id_middleware is called - # first and microversion_middleware last. Then the request is finally - # passed to the application (the PlacementHandler). At that point - # the response ascends the middleware in the reverse of the - # order the request went in. This order ensures that log messages - # all see the same contextual information including request id and - # authentication information. - for middleware in (fault_middleware, - request_log, - context_middleware, - auth_middleware, - cors_middleware, - req_id_middleware, - ): - if middleware: - application = middleware(application) - - return application - - -def update_database(): - """Do any database updates required at process boot time, such as - updating the traits table. - """ - ctx = db_api.DbContext() - resource_provider.ensure_trait_sync(ctx) - resource_provider.ensure_rc_cache(ctx) - - -# NOTE(cdent): Althought project_name is no longer used because of the -# resolution of https://bugs.launchpad.net/nova/+bug/1734491, loadapp() -# is considered a public interface for the creation of a placement -# WSGI app so must maintain its interface. The canonical placement WSGI -# app is created by init_application in wsgi.py, but this is not -# required and in fact can be limiting. loadapp() may be used from -# fixtures or arbitrary WSGI frameworks and loaders. -def loadapp(config, project_name=NAME): - """WSGI application creator for placement. - - :param config: An olso_config.cfg.ConfigOpts containing placement - configuration. - :param project_name: oslo_config project name. Ignored, preserved for - backwards compatibility - """ - application = deploy(config) - update_database() - return application diff --git a/nova/api/openstack/placement/direct.py b/nova/api/openstack/placement/direct.py deleted file mode 100644 index 66e11e7f62c..00000000000 --- a/nova/api/openstack/placement/direct.py +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Call any URI in the placement service directly without real HTTP. - -This is useful for those cases where processes wish to manipulate the -Placement datastore but do not want to run Placement as a long running -service. A PlacementDirect context manager is provided. Within that -HTTP requests may be made as normal but they will not actually traverse -a real socket. -""" - -from keystoneauth1 import adapter -from keystoneauth1 import session -import mock -from oslo_utils import uuidutils -import requests -from wsgi_intercept import interceptor - -from nova.api.openstack.placement import deploy - - -class PlacementDirect(interceptor.RequestsInterceptor): - """Provide access to the placement service without real HTTP. - - wsgi-intercept is used to provide a keystoneauth1 Adapter that has access - to an in-process placement service. This provides access to making changes - to the placement database without requiring HTTP over the network - it - remains in-process. - - Authentication to the service is turned off; admin access is assumed. - - Access is provided via a context manager which is responsible for - turning the wsgi-intercept on and off, and setting and removing - mocks required to keystoneauth1 to work around endpoint discovery. - - Example:: - - with PlacementDirect(cfg.CONF, latest_microversion=True) as client: - allocations = client.get('/allocations/%s' % consumer) - - :param conf: An oslo config with the options used to configure - the placement service (notably database connection - string). - :param latest_microversion: If True, API requests will use the latest - microversion if not otherwise specified. If - False (the default), the base microversion is - the default. - """ - - def __init__(self, conf, latest_microversion=False): - conf.set_override('auth_strategy', 'noauth2', group='api') - app = lambda: deploy.loadapp(conf) - self.url = 'http://%s/placement' % str(uuidutils.generate_uuid()) - # Supply our own session so the wsgi-intercept can intercept - # the right thing. - request_session = requests.Session() - headers = { - 'x-auth-token': 'admin', - } - # TODO(efried): See below - if latest_microversion: - headers['OpenStack-API-Version'] = 'placement latest' - self.adapter = adapter.Adapter( - session.Session(auth=None, session=request_session, - additional_headers=headers), - service_type='placement', raise_exc=False) - # TODO(efried): Figure out why this isn't working: - # default_microversion='latest' if latest_microversion else None) - self._mocked_endpoint = mock.patch( - 'keystoneauth1.session.Session.get_endpoint', - new=mock.Mock(return_value=self.url)) - super(PlacementDirect, self).__init__(app, url=self.url) - - def __enter__(self): - """Start the wsgi-intercept interceptor and keystone endpoint mock. - - A no auth ksa Adapter is provided to the context being managed. - """ - super(PlacementDirect, self).__enter__() - self._mocked_endpoint.start() - return self.adapter - - def __exit__(self, *exc): - self._mocked_endpoint.stop() - return super(PlacementDirect, self).__exit__(*exc) diff --git a/nova/api/openstack/placement/errors.py b/nova/api/openstack/placement/errors.py deleted file mode 100644 index b22cb38f1cd..00000000000 --- a/nova/api/openstack/placement/errors.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Error code symbols to be used in structured JSON error responses. - -These are strings to be used in the 'code' attribute, as described by -the API guideline on `errors`_. - -There must be only one instance of any string value and it should have -only one associated constant SYMBOL. - -In a WSGI handler (representing the sole handler for an HTTP method and -URI) each error condition should get a separate error code. Reusing an -error code in a different handler is not just acceptable, but useful. - -For example 'placement.inventory.inuse' is meaningful and correct in both -``PUT /resource_providers/{uuid}/inventories`` and ``DELETE`` on the same -URI. - -.. _errors: http://specs.openstack.org/openstack/api-wg/guidelines/errors.html -""" - -# NOTE(cdent): This is the simplest thing that can possibly work, for now. -# If it turns out we want to automate this, or put different resources in -# different files, or otherwise change things, that's fine. The only thing -# that needs to be maintained as the same are the strings that API end -# users use. How they are created is completely fungible. - - -# Do not change the string values. Once set, they are set. -# Do not reuse string values. There should be only one symbol for any -# value. -DEFAULT = 'placement.undefined_code' -INVENTORY_INUSE = 'placement.inventory.inuse' -CONCURRENT_UPDATE = 'placement.concurrent_update' -DUPLICATE_NAME = 'placement.duplicate_name' -PROVIDER_IN_USE = 'placement.resource_provider.inuse' -PROVIDER_CANNOT_DELETE_PARENT = ( - 'placement.resource_provider.cannot_delete_parent') diff --git a/nova/api/openstack/placement/exception.py b/nova/api/openstack/placement/exception.py deleted file mode 100644 index 7973fb28b26..00000000000 --- a/nova/api/openstack/placement/exception.py +++ /dev/null @@ -1,207 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Exceptions for use in the Placement API.""" - -# NOTE(cdent): The exceptions are copied from nova.exception, where they -# were originally used. To prepare for extracting placement to its own -# repository we wish to no longer do that. Instead, exceptions used by -# placement should be in the placement hierarchy. - -from oslo_log import log as logging - -from nova.i18n import _ - - -LOG = logging.getLogger(__name__) - - -class _BaseException(Exception): - """Base Exception - - To correctly use this class, inherit from it and define - a 'msg_fmt' property. That msg_fmt will get printf'd - with the keyword arguments provided to the constructor. - - """ - msg_fmt = _("An unknown exception occurred.") - - def __init__(self, message=None, **kwargs): - self.kwargs = kwargs - - if not message: - try: - message = self.msg_fmt % kwargs - except Exception: - # NOTE(melwitt): This is done in a separate method so it can be - # monkey-patched during testing to make it a hard failure. - self._log_exception() - message = self.msg_fmt - - self.message = message - super(_BaseException, self).__init__(message) - - def _log_exception(self): - # kwargs doesn't match a variable in the message - # log the issue and the kwargs - LOG.exception('Exception in string format operation') - for name, value in self.kwargs.items(): - LOG.error("%s: %s" % (name, value)) # noqa - - def format_message(self): - # Use the first argument to the python Exception object which - # should be our full exception message, (see __init__). - return self.args[0] - - -class NotFound(_BaseException): - msg_fmt = _("Resource could not be found.") - - -class Exists(_BaseException): - msg_fmt = _("Resource already exists.") - - -class InvalidInventory(_BaseException): - msg_fmt = _("Inventory for '%(resource_class)s' on " - "resource provider '%(resource_provider)s' invalid.") - - -class CannotDeleteParentResourceProvider(_BaseException): - msg_fmt = _("Cannot delete resource provider that is a parent of " - "another. Delete child providers first.") - - -class ConcurrentUpdateDetected(_BaseException): - msg_fmt = _("Another thread concurrently updated the data. " - "Please retry your update") - - -class ResourceProviderConcurrentUpdateDetected(ConcurrentUpdateDetected): - msg_fmt = _("Another thread concurrently updated the resource provider " - "data. Please retry your update") - - -class InvalidAllocationCapacityExceeded(InvalidInventory): - msg_fmt = _("Unable to create allocation for '%(resource_class)s' on " - "resource provider '%(resource_provider)s'. The requested " - "amount would exceed the capacity.") - - -class InvalidAllocationConstraintsViolated(InvalidInventory): - msg_fmt = _("Unable to create allocation for '%(resource_class)s' on " - "resource provider '%(resource_provider)s'. The requested " - "amount would violate inventory constraints.") - - -class InvalidInventoryCapacity(InvalidInventory): - msg_fmt = _("Invalid inventory for '%(resource_class)s' on " - "resource provider '%(resource_provider)s'. " - "The reserved value is greater than or equal to total.") - - -class InvalidInventoryCapacityReservedCanBeTotal(InvalidInventoryCapacity): - msg_fmt = _("Invalid inventory for '%(resource_class)s' on " - "resource provider '%(resource_provider)s'. " - "The reserved value is greater than total.") - - -# An exception with this name is used on both sides of the placement/ -# nova interaction. -class InventoryInUse(InvalidInventory): - # NOTE(mriedem): This message cannot change without impacting the - # nova.scheduler.client.report._RE_INV_IN_USE regex. - msg_fmt = _("Inventory for '%(resource_classes)s' on " - "resource provider '%(resource_provider)s' in use.") - - -class InventoryWithResourceClassNotFound(NotFound): - msg_fmt = _("No inventory of class %(resource_class)s found.") - - -class MaxDBRetriesExceeded(_BaseException): - msg_fmt = _("Max retries of DB transaction exceeded attempting to " - "perform %(action)s.") - - -class ObjectActionError(_BaseException): - msg_fmt = _('Object action %(action)s failed because: %(reason)s') - - -class PolicyNotAuthorized(_BaseException): - msg_fmt = _("Policy does not allow %(action)s to be performed.") - - -class ResourceClassCannotDeleteStandard(_BaseException): - msg_fmt = _("Cannot delete standard resource class %(resource_class)s.") - - -class ResourceClassCannotUpdateStandard(_BaseException): - msg_fmt = _("Cannot update standard resource class %(resource_class)s.") - - -class ResourceClassExists(_BaseException): - msg_fmt = _("Resource class %(resource_class)s already exists.") - - -class ResourceClassInUse(_BaseException): - msg_fmt = _("Cannot delete resource class %(resource_class)s. " - "Class is in use in inventory.") - - -class ResourceClassNotFound(NotFound): - msg_fmt = _("No such resource class %(resource_class)s.") - - -# An exception with this name is used on both sides of the placement/ -# nova interaction. -class ResourceProviderInUse(_BaseException): - msg_fmt = _("Resource provider has allocations.") - - -class TraitCannotDeleteStandard(_BaseException): - msg_fmt = _("Cannot delete standard trait %(name)s.") - - -class TraitExists(_BaseException): - msg_fmt = _("The Trait %(name)s already exists") - - -class TraitInUse(_BaseException): - msg_fmt = _("The trait %(name)s is in use by a resource provider.") - - -class TraitNotFound(NotFound): - msg_fmt = _("No such trait(s): %(names)s.") - - -class ProjectNotFound(NotFound): - msg_fmt = _("No such project(s): %(external_id)s.") - - -class ProjectExists(Exists): - msg_fmt = _("The project %(external_id)s already exists.") - - -class UserNotFound(NotFound): - msg_fmt = _("No such user(s): %(external_id)s.") - - -class UserExists(Exists): - msg_fmt = _("The user %(external_id)s already exists.") - - -class ConsumerNotFound(NotFound): - msg_fmt = _("No such consumer(s): %(uuid)s.") - - -class ConsumerExists(Exists): - msg_fmt = _("The consumer %(uuid)s already exists.") diff --git a/nova/api/openstack/placement/fault_wrap.py b/nova/api/openstack/placement/fault_wrap.py deleted file mode 100644 index 764d628b496..00000000000 --- a/nova/api/openstack/placement/fault_wrap.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Simple middleware for safely catching unexpected exceptions.""" - -# NOTE(cdent): This is a super simplified replacement for the nova -# FaultWrapper, which does more than placement needs. - -from oslo_log import log as logging -import six -from webob import exc - -from nova.api.openstack.placement import util - -LOG = logging.getLogger(__name__) - - -class FaultWrapper(object): - """Turn an uncaught exception into a status 500. - - Uncaught exceptions usually shouldn't happen, if it does it - means there is a bug in the placement service, which should be - fixed. - """ - - def __init__(self, application): - self.application = application - - def __call__(self, environ, start_response): - try: - return self.application(environ, start_response) - except Exception as unexpected_exception: - LOG.exception('Placement API unexpected error: %s', - unexpected_exception) - formatted_exception = exc.HTTPInternalServerError( - six.text_type(unexpected_exception)) - formatted_exception.json_formatter = util.json_error_formatter - return formatted_exception.generate_response( - environ, start_response) diff --git a/nova/api/openstack/placement/handler.py b/nova/api/openstack/placement/handler.py deleted file mode 100644 index 62b40280d2b..00000000000 --- a/nova/api/openstack/placement/handler.py +++ /dev/null @@ -1,227 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Handlers for placement API. - -Individual handlers are associated with URL paths in the -ROUTE_DECLARATIONS dictionary. At the top level each key is a Routes -compliant path. The value of that key is a dictionary mapping -individual HTTP request methods to a Python function representing a -simple WSGI application for satisfying that request. - -The ``make_map`` method processes ROUTE_DECLARATIONS to create a -Routes.Mapper, including automatic handlers to respond with a -405 when a request is made against a valid URL with an invalid -method. -""" - -import routes -import webob - -from oslo_log import log as logging - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.handlers import aggregate -from nova.api.openstack.placement.handlers import allocation -from nova.api.openstack.placement.handlers import allocation_candidate -from nova.api.openstack.placement.handlers import inventory -from nova.api.openstack.placement.handlers import resource_class -from nova.api.openstack.placement.handlers import resource_provider -from nova.api.openstack.placement.handlers import root -from nova.api.openstack.placement.handlers import trait -from nova.api.openstack.placement.handlers import usage -from nova.api.openstack.placement import util -from nova.i18n import _ - -LOG = logging.getLogger(__name__) - -# URLs and Handlers -# NOTE(cdent): When adding URLs here, do not use regex patterns in -# the path parameters (e.g. {uuid:[0-9a-zA-Z-]+}) as that will lead -# to 404s that are controlled outside of the individual resources -# and thus do not include specific information on the why of the 404. -ROUTE_DECLARATIONS = { - '/': { - 'GET': root.home, - }, - # NOTE(cdent): This allows '/placement/' and '/placement' to - # both work as the root of the service, which we probably want - # for those situations where the service is mounted under a - # prefix (as it is in devstack). While weird, an empty string is - # a legit key in a dictionary and matches as desired in Routes. - '': { - 'GET': root.home, - }, - '/resource_classes': { - 'GET': resource_class.list_resource_classes, - 'POST': resource_class.create_resource_class - }, - '/resource_classes/{name}': { - 'GET': resource_class.get_resource_class, - 'PUT': resource_class.update_resource_class, - 'DELETE': resource_class.delete_resource_class, - }, - '/resource_providers': { - 'GET': resource_provider.list_resource_providers, - 'POST': resource_provider.create_resource_provider - }, - '/resource_providers/{uuid}': { - 'GET': resource_provider.get_resource_provider, - 'DELETE': resource_provider.delete_resource_provider, - 'PUT': resource_provider.update_resource_provider - }, - '/resource_providers/{uuid}/inventories': { - 'GET': inventory.get_inventories, - 'POST': inventory.create_inventory, - 'PUT': inventory.set_inventories, - 'DELETE': inventory.delete_inventories - }, - '/resource_providers/{uuid}/inventories/{resource_class}': { - 'GET': inventory.get_inventory, - 'PUT': inventory.update_inventory, - 'DELETE': inventory.delete_inventory - }, - '/resource_providers/{uuid}/usages': { - 'GET': usage.list_usages - }, - '/resource_providers/{uuid}/aggregates': { - 'GET': aggregate.get_aggregates, - 'PUT': aggregate.set_aggregates - }, - '/resource_providers/{uuid}/allocations': { - 'GET': allocation.list_for_resource_provider, - }, - '/allocations': { - 'POST': allocation.set_allocations, - }, - '/allocations/{consumer_uuid}': { - 'GET': allocation.list_for_consumer, - 'PUT': allocation.set_allocations_for_consumer, - 'DELETE': allocation.delete_allocations, - }, - '/allocation_candidates': { - 'GET': allocation_candidate.list_allocation_candidates, - }, - '/traits': { - 'GET': trait.list_traits, - }, - '/traits/{name}': { - 'GET': trait.get_trait, - 'PUT': trait.put_trait, - 'DELETE': trait.delete_trait, - }, - '/resource_providers/{uuid}/traits': { - 'GET': trait.list_traits_for_resource_provider, - 'PUT': trait.update_traits_for_resource_provider, - 'DELETE': trait.delete_traits_for_resource_provider - }, - '/usages': { - 'GET': usage.get_total_usages, - }, -} - - -def dispatch(environ, start_response, mapper): - """Find a matching route for the current request. - - If no match is found, raise a 404 response. - If there is a matching route, but no matching handler - for the given method, raise a 405. - """ - result = mapper.match(environ=environ) - if result is None: - raise webob.exc.HTTPNotFound( - json_formatter=util.json_error_formatter) - # We can't reach this code without action being present. - handler = result.pop('action') - environ['wsgiorg.routing_args'] = ((), result) - return handler(environ, start_response) - - -def handle_405(environ, start_response): - """Return a 405 response when method is not allowed. - - If _methods are in routing_args, send an allow header listing - the methods that are possible on the provided URL. - """ - _methods = util.wsgi_path_item(environ, '_methods') - headers = {} - if _methods: - # Ensure allow header is a python 2 or 3 native string (thus - # not unicode in python 2 but stay a string in python 3) - # In the process done by Routes to save the allowed methods - # to its routing table they become unicode in py2. - headers['allow'] = str(_methods) - # Use Exception class as WSGI Application. We don't want to raise here. - response = webob.exc.HTTPMethodNotAllowed( - _('The method specified is not allowed for this resource.'), - headers=headers, json_formatter=util.json_error_formatter) - return response(environ, start_response) - - -def make_map(declarations): - """Process route declarations to create a Route Mapper.""" - mapper = routes.Mapper() - for route, targets in declarations.items(): - allowed_methods = [] - for method in targets: - mapper.connect(route, action=targets[method], - conditions=dict(method=[method])) - allowed_methods.append(method) - allowed_methods = ', '.join(allowed_methods) - mapper.connect(route, action=handle_405, _methods=allowed_methods) - return mapper - - -class PlacementHandler(object): - """Serve Placement API. - - Dispatch to handlers defined in ROUTE_DECLARATIONS. - """ - - def __init__(self, **local_config): - # NOTE(cdent): Local config currently unused. - self._map = make_map(ROUTE_DECLARATIONS) - - def __call__(self, environ, start_response): - # Check that an incoming request with a content-length header - # that is an integer > 0 and not empty, also has a content-type - # header that is not empty. If not raise a 400. - clen = environ.get('CONTENT_LENGTH') - try: - if clen and (int(clen) > 0) and not environ.get('CONTENT_TYPE'): - raise webob.exc.HTTPBadRequest( - _('content-type header required when content-length > 0'), - json_formatter=util.json_error_formatter) - except ValueError as exc: - raise webob.exc.HTTPBadRequest( - _('content-length header must be an integer'), - json_formatter=util.json_error_formatter) - try: - return dispatch(environ, start_response, self._map) - # Trap the NotFound exceptions raised by the objects used - # with the API and transform them into webob.exc.HTTPNotFound. - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - exc, json_formatter=util.json_error_formatter) - except exception.PolicyNotAuthorized as exc: - raise webob.exc.HTTPForbidden( - exc.format_message(), - json_formatter=util.json_error_formatter) - # Remaining uncaught exceptions will rise first to the Microversion - # middleware, where any WebOb generated exceptions will be caught and - # transformed into legit HTTP error responses (with microversion - # headers added), and then to the FaultWrapper middleware which will - # catch anything else and transform them into 500 responses. - # NOTE(cdent): There should be very few uncaught exceptions which are - # not WebOb exceptions at this stage as the handlers are contained by - # the wsgify decorator which will transform those exceptions to - # responses itself. diff --git a/nova/api/openstack/placement/handlers/aggregate.py b/nova/api/openstack/placement/handlers/aggregate.py deleted file mode 100644 index a26839c3739..00000000000 --- a/nova/api/openstack/placement/handlers/aggregate.py +++ /dev/null @@ -1,133 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Aggregate handlers for Placement API.""" - -from oslo_db import exception as db_exc -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils -import webob - -from nova.api.openstack.placement import errors -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.policies import aggregate as policies -from nova.api.openstack.placement.schemas import aggregate as schema -from nova.api.openstack.placement import util -from nova.api.openstack.placement import wsgi_wrapper -from nova.i18n import _ - - -_INCLUDE_GENERATION_VERSION = (1, 19) - - -def _send_aggregates(req, resource_provider, aggregate_uuids): - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - response = req.response - response.status = 200 - payload = _serialize_aggregates(aggregate_uuids) - if want_version.matches(min_version=_INCLUDE_GENERATION_VERSION): - payload['resource_provider_generation'] = resource_provider.generation - response.body = encodeutils.to_utf8( - jsonutils.dumps(payload)) - response.content_type = 'application/json' - if want_version.matches((1, 15)): - req.response.cache_control = 'no-cache' - # We never get an aggregate itself, we get the list of aggregates - # that are associated with a resource provider. We don't record the - # time when that association was made and the time when an aggregate - # uuid was created is not relevant, so here we punt and use utcnow. - req.response.last_modified = timeutils.utcnow(with_timezone=True) - return response - - -def _serialize_aggregates(aggregate_uuids): - return {'aggregates': aggregate_uuids} - - -def _set_aggregates(resource_provider, aggregate_uuids, - increment_generation=False): - """Set aggregates for the resource provider. - - If increment generation is true, the resource provider generation - will be incremented if possible. If that fails (because something - else incremented the generation in another thread), a - ConcurrentUpdateDetected will be raised. - """ - # NOTE(cdent): It's not clear what the DBDuplicateEntry handling - # is doing here, set_aggregates already handles that, but I'm leaving - # it here because it was already there. - try: - resource_provider.set_aggregates( - aggregate_uuids, increment_generation=increment_generation) - except exception.ConcurrentUpdateDetected as exc: - raise webob.exc.HTTPConflict( - _('Update conflict: %(error)s') % {'error': exc}, - comment=errors.CONCURRENT_UPDATE) - except db_exc.DBDuplicateEntry as exc: - raise webob.exc.HTTPConflict( - _('Update conflict: %(error)s') % {'error': exc}) - - -@wsgi_wrapper.PlacementWsgify -@util.check_accept('application/json') -@microversion.version_handler('1.1') -def get_aggregates(req): - """GET a list of aggregates associated with a resource provider. - - If the resource provider does not exist return a 404. - - On success return a 200 with an application/json body containing a - list of aggregate uuids. - """ - context = req.environ['placement.context'] - context.can(policies.LIST) - uuid = util.wsgi_path_item(req.environ, 'uuid') - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - aggregate_uuids = resource_provider.get_aggregates() - - return _send_aggregates(req, resource_provider, aggregate_uuids) - - -@wsgi_wrapper.PlacementWsgify -@util.require_content('application/json') -@microversion.version_handler('1.1') -def set_aggregates(req): - context = req.environ['placement.context'] - context.can(policies.UPDATE) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - consider_generation = want_version.matches( - min_version=_INCLUDE_GENERATION_VERSION) - put_schema = schema.PUT_AGGREGATES_SCHEMA_V1_1 - if consider_generation: - put_schema = schema.PUT_AGGREGATES_SCHEMA_V1_19 - uuid = util.wsgi_path_item(req.environ, 'uuid') - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - data = util.extract_json(req.body, put_schema) - if consider_generation: - # Check for generation conflict - rp_gen = data['resource_provider_generation'] - if resource_provider.generation != rp_gen: - raise webob.exc.HTTPConflict( - _("Resource provider's generation already changed. Please " - "update the generation and try again."), - comment=errors.CONCURRENT_UPDATE) - aggregate_uuids = data['aggregates'] - else: - aggregate_uuids = data - _set_aggregates(resource_provider, aggregate_uuids, - increment_generation=consider_generation) - - return _send_aggregates(req, resource_provider, aggregate_uuids) diff --git a/nova/api/openstack/placement/handlers/allocation.py b/nova/api/openstack/placement/handlers/allocation.py deleted file mode 100644 index 8063cc9b374..00000000000 --- a/nova/api/openstack/placement/handlers/allocation.py +++ /dev/null @@ -1,555 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API handlers for setting and deleting allocations.""" - -import collections -import uuid - -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import webob - -from nova.api.openstack.placement import errors -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.policies import allocation as policies -from nova.api.openstack.placement.schemas import allocation as schema -from nova.api.openstack.placement import util -from nova.api.openstack.placement import wsgi_wrapper -from nova.i18n import _ - - -LOG = logging.getLogger(__name__) - - -def _last_modified_from_allocations(allocations, want_version): - """Given a set of allocation objects, returns the last modified timestamp. - """ - # NOTE(cdent): The last_modified for an allocation will always be - # based off the created_at column because allocations are only - # ever inserted, never updated. - last_modified = None - # Only calculate last-modified if we are using a microversion that - # supports it. - get_last_modified = want_version and want_version.matches((1, 15)) - for allocation in allocations: - if get_last_modified: - last_modified = util.pick_last_modified(last_modified, allocation) - - last_modified = last_modified or timeutils.utcnow(with_timezone=True) - return last_modified - - -def _serialize_allocations_for_consumer(allocations, want_version): - """Turn a list of allocations into a dict by resource provider uuid. - - { - 'allocations': { - RP_UUID_1: { - 'generation': GENERATION, - 'resources': { - 'DISK_GB': 4, - 'VCPU': 2 - } - }, - RP_UUID_2: { - 'generation': GENERATION, - 'resources': { - 'DISK_GB': 6, - 'VCPU': 3 - } - } - }, - # project_id and user_id are added with microverion 1.12 - 'project_id': PROJECT_ID, - 'user_id': USER_ID, - # Generation for consumer >= 1.28 - 'consumer_generation': 1 - } - """ - allocation_data = collections.defaultdict(dict) - for allocation in allocations: - key = allocation.resource_provider.uuid - if 'resources' not in allocation_data[key]: - allocation_data[key]['resources'] = {} - - resource_class = allocation.resource_class - allocation_data[key]['resources'][resource_class] = allocation.used - generation = allocation.resource_provider.generation - allocation_data[key]['generation'] = generation - - result = {'allocations': allocation_data} - if allocations and want_version.matches((1, 12)): - # We're looking at a list of allocations by consumer id so project and - # user are consistent across the list - consumer = allocations[0].consumer - project_id = consumer.project.external_id - user_id = consumer.user.external_id - result['project_id'] = project_id - result['user_id'] = user_id - show_consumer_gen = want_version.matches((1, 28)) - if show_consumer_gen: - result['consumer_generation'] = consumer.generation - - return result - - -def _serialize_allocations_for_resource_provider(allocations, - resource_provider, - want_version): - """Turn a list of allocations into a dict by consumer id. - - {'resource_provider_generation': GENERATION, - 'allocations': - CONSUMER_ID_1: { - 'resources': { - 'DISK_GB': 4, - 'VCPU': 2 - }, - # Generation for consumer >= 1.28 - 'consumer_generation': 0 - }, - CONSUMER_ID_2: { - 'resources': { - 'DISK_GB': 6, - 'VCPU': 3 - }, - # Generation for consumer >= 1.28 - 'consumer_generation': 0 - } - } - """ - show_consumer_gen = want_version.matches((1, 28)) - allocation_data = collections.defaultdict(dict) - for allocation in allocations: - key = allocation.consumer.uuid - if 'resources' not in allocation_data[key]: - allocation_data[key]['resources'] = {} - - resource_class = allocation.resource_class - allocation_data[key]['resources'][resource_class] = allocation.used - - if show_consumer_gen: - consumer_gen = None - if allocation.consumer is not None: - consumer_gen = allocation.consumer.generation - allocation_data[key]['consumer_generation'] = consumer_gen - - result = {'allocations': allocation_data} - result['resource_provider_generation'] = resource_provider.generation - return result - - -# TODO(cdent): Extracting this is useful, for reuse by reshaper code, -# but having it in this file seems wrong, however, since it uses -# _new_allocations it's being left here for now. We need a place for shared -# handler code, but util.py is already too big and too diverse. -def create_allocation_list(context, data, consumers): - """Create an AllocationList based on provided data. - - :param context: The placement context. - :param data: A dictionary of multiple allocations by consumer uuid. - :param consumers: A dictionary, keyed by consumer UUID, of Consumer objects - :return: An AllocationList. - :raises: `webob.exc.HTTPBadRequest` if a resource provider included in the - allocations does not exist. - """ - allocation_objects = [] - - for consumer_uuid in data: - allocations = data[consumer_uuid]['allocations'] - consumer = consumers[consumer_uuid] - if allocations: - rp_objs = _resource_providers_by_uuid(context, allocations.keys()) - for resource_provider_uuid in allocations: - resource_provider = rp_objs[resource_provider_uuid] - resources = allocations[resource_provider_uuid]['resources'] - new_allocations = _new_allocations(context, - resource_provider, - consumer, - resources) - allocation_objects.extend(new_allocations) - else: - # The allocations are empty, which means wipe them out. - # Internal to the allocation object this is signalled by a - # used value of 0. - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - context, consumer_uuid) - for allocation in allocations: - allocation.used = 0 - allocation_objects.append(allocation) - - return rp_obj.AllocationList(context, objects=allocation_objects) - - -@wsgi_wrapper.PlacementWsgify -@util.check_accept('application/json') -def list_for_consumer(req): - """List allocations associated with a consumer.""" - context = req.environ['placement.context'] - context.can(policies.ALLOC_LIST) - consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid') - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - - # NOTE(cdent): There is no way for a 404 to be returned here, - # only an empty result. We do not have a way to validate a - # consumer id. - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - context, consumer_id) - - output = _serialize_allocations_for_consumer(allocations, want_version) - last_modified = _last_modified_from_allocations(allocations, want_version) - allocations_json = jsonutils.dumps(output) - - response = req.response - response.status = 200 - response.body = encodeutils.to_utf8(allocations_json) - response.content_type = 'application/json' - if want_version.matches((1, 15)): - response.last_modified = last_modified - response.cache_control = 'no-cache' - return response - - -@wsgi_wrapper.PlacementWsgify -@util.check_accept('application/json') -def list_for_resource_provider(req): - """List allocations associated with a resource provider.""" - # TODO(cdent): On a shared resource provider (for example a - # giant disk farm) this list could get very long. At the moment - # we have no facility for limiting the output. Given that we are - # using a dict of dicts for the output we are potentially limiting - # ourselves in terms of sorting and filtering. - context = req.environ['placement.context'] - context.can(policies.RP_ALLOC_LIST) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - uuid = util.wsgi_path_item(req.environ, 'uuid') - - # confirm existence of resource provider so we get a reasonable - # 404 instead of empty list - try: - rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - _("Resource provider '%(rp_uuid)s' not found: %(error)s") % - {'rp_uuid': uuid, 'error': exc}) - - allocs = rp_obj.AllocationList.get_all_by_resource_provider(context, rp) - - output = _serialize_allocations_for_resource_provider( - allocs, rp, want_version) - last_modified = _last_modified_from_allocations(allocs, want_version) - allocations_json = jsonutils.dumps(output) - - response = req.response - response.status = 200 - response.body = encodeutils.to_utf8(allocations_json) - response.content_type = 'application/json' - if want_version.matches((1, 15)): - response.last_modified = last_modified - response.cache_control = 'no-cache' - return response - - -def _resource_providers_by_uuid(ctx, rp_uuids): - """Helper method that returns a dict, keyed by resource provider UUID, of - ResourceProvider objects. - - :param ctx: The placement context. - :param rp_uuids: iterable of UUIDs for providers to fetch. - :raises: `webob.exc.HTTPBadRequest` if any of the UUIDs do not refer to - an existing resource provider. - """ - res = {} - for rp_uuid in rp_uuids: - # TODO(jaypipes): Clearly, this is not efficient to do one query for - # each resource provider UUID in the allocations instead of doing a - # single query for all the UUIDs. However, since - # ResourceProviderList.get_all_by_filters() is way too complicated for - # this purpose and doesn't raise NotFound anyway, we'll do this. - # Perhaps consider adding a ResourceProviderList.get_all_by_uuids() - # later on? - try: - res[rp_uuid] = rp_obj.ResourceProvider.get_by_uuid(ctx, rp_uuid) - except exception.NotFound: - raise webob.exc.HTTPBadRequest( - _("Allocation for resource provider '%(rp_uuid)s' " - "that does not exist.") % - {'rp_uuid': rp_uuid}) - return res - - -def _new_allocations(context, resource_provider, consumer, resources): - """Create new allocation objects for a set of resources - - Returns a list of Allocation objects - - :param context: The placement context. - :param resource_provider: The resource provider that has the resources. - :param consumer: The Consumer object consuming the resources. - :param resources: A dict of resource classes and values. - """ - allocations = [] - for resource_class in resources: - allocation = rp_obj.Allocation( - resource_provider=resource_provider, - consumer=consumer, - resource_class=resource_class, - used=resources[resource_class]) - allocations.append(allocation) - return allocations - - -def _delete_consumers(consumers): - """Helper function that deletes any consumer object supplied to it - - :param consumers: iterable of Consumer objects to delete - """ - for consumer in consumers: - try: - consumer.delete() - LOG.debug("Deleted auto-created consumer with consumer UUID " - "%s after failed allocation", consumer.uuid) - except Exception as err: - LOG.warning("Got an exception when deleting auto-created " - "consumer with UUID %s: %s", consumer.uuid, err) - - -def _set_allocations_for_consumer(req, schema): - context = req.environ['placement.context'] - context.can(policies.ALLOC_UPDATE) - consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid') - if not uuidutils.is_uuid_like(consumer_uuid): - raise webob.exc.HTTPBadRequest( - _('Malformed consumer_uuid: %(consumer_uuid)s') % - {'consumer_uuid': consumer_uuid}) - consumer_uuid = str(uuid.UUID(consumer_uuid)) - data = util.extract_json(req.body, schema) - allocation_data = data['allocations'] - - # Normalize allocation data to dict. - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - if not want_version.matches((1, 12)): - allocations_dict = {} - # Allocation are list-ish, transform to dict-ish - for allocation in allocation_data: - resource_provider_uuid = allocation['resource_provider']['uuid'] - allocations_dict[resource_provider_uuid] = { - 'resources': allocation['resources'] - } - allocation_data = allocations_dict - - allocation_objects = [] - # Consumer object saved in case we need to delete the auto-created consumer - # record - consumer = None - # Whether we created a new consumer record - created_new_consumer = False - if not allocation_data: - # The allocations are empty, which means wipe them out. Internal - # to the allocation object this is signalled by a used value of 0. - # We still need to verify the consumer's generation, though, which - # we do in _ensure_consumer() - # NOTE(jaypipes): This will only occur 1.28+. The JSONSchema will - # prevent an empty allocations object from being passed when there is - # no consumer generation, so this is safe to do. - util.ensure_consumer(context, consumer_uuid, data.get('project_id'), - data.get('user_id'), data.get('consumer_generation'), - want_version) - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - context, consumer_uuid) - for allocation in allocations: - allocation.used = 0 - allocation_objects.append(allocation) - else: - # If the body includes an allocation for a resource provider - # that does not exist, raise a 400. - rp_objs = _resource_providers_by_uuid(context, allocation_data.keys()) - consumer, created_new_consumer = util.ensure_consumer( - context, consumer_uuid, data.get('project_id'), - data.get('user_id'), data.get('consumer_generation'), - want_version) - for resource_provider_uuid, allocation in allocation_data.items(): - resource_provider = rp_objs[resource_provider_uuid] - new_allocations = _new_allocations(context, - resource_provider, - consumer, - allocation['resources']) - allocation_objects.extend(new_allocations) - - allocations = rp_obj.AllocationList( - context, objects=allocation_objects) - - def _create_allocations(alloc_list): - try: - alloc_list.replace_all() - LOG.debug("Successfully wrote allocations %s", alloc_list) - except Exception: - if created_new_consumer: - _delete_consumers([consumer]) - raise - - try: - _create_allocations(allocations) - # InvalidInventory is a parent for several exceptions that - # indicate either that Inventory is not present, or that - # capacity limits have been exceeded. - except exception.NotFound as exc: - raise webob.exc.HTTPBadRequest( - _("Unable to allocate inventory for consumer " - "%(consumer_uuid)s: %(error)s") % - {'consumer_uuid': consumer_uuid, 'error': exc}) - except exception.InvalidInventory as exc: - raise webob.exc.HTTPConflict( - _('Unable to allocate inventory: %(error)s') % {'error': exc}) - except exception.ConcurrentUpdateDetected as exc: - raise webob.exc.HTTPConflict( - _('Inventory and/or allocations changed while attempting to ' - 'allocate: %(error)s') % {'error': exc}, - comment=errors.CONCURRENT_UPDATE) - - req.response.status = 204 - req.response.content_type = None - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.0', '1.7') -@util.require_content('application/json') -def set_allocations_for_consumer(req): - return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA) - - -@wsgi_wrapper.PlacementWsgify # noqa -@microversion.version_handler('1.8', '1.11') -@util.require_content('application/json') -def set_allocations_for_consumer(req): - return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_8) - - -@wsgi_wrapper.PlacementWsgify # noqa -@microversion.version_handler('1.12', '1.27') -@util.require_content('application/json') -def set_allocations_for_consumer(req): - return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_12) - - -@wsgi_wrapper.PlacementWsgify # noqa -@microversion.version_handler('1.28') -@util.require_content('application/json') -def set_allocations_for_consumer(req): - return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_28) - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.13') -@util.require_content('application/json') -def set_allocations(req): - context = req.environ['placement.context'] - context.can(policies.ALLOC_MANAGE) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - want_schema = schema.POST_ALLOCATIONS_V1_13 - if want_version.matches((1, 28)): - want_schema = schema.POST_ALLOCATIONS_V1_28 - data = util.extract_json(req.body, want_schema) - - # First, ensure that all consumers referenced in the payload actually - # exist. And if not, create them. Keep a record of auto-created consumers - # so we can clean them up if the end allocation replace_all() fails. - consumers = {} # dict of Consumer objects, keyed by consumer UUID - new_consumers_created = [] - for consumer_uuid in data: - project_id = data[consumer_uuid]['project_id'] - user_id = data[consumer_uuid]['user_id'] - consumer_generation = data[consumer_uuid].get('consumer_generation') - try: - consumer, new_consumer_created = util.ensure_consumer( - context, consumer_uuid, project_id, user_id, - consumer_generation, want_version) - if new_consumer_created: - new_consumers_created.append(consumer) - consumers[consumer_uuid] = consumer - except Exception: - # If any errors (for instance, a consumer generation conflict) - # occur when ensuring consumer records above, make sure we delete - # any auto-created consumers. - _delete_consumers(new_consumers_created) - raise - - # Create a sequence of allocation objects to be used in one - # AllocationList.replace_all() call, which will mean all the changes - # happen within a single transaction and with resource provider - # and consumer generations (if applicable) check all in one go. - allocations = create_allocation_list(context, data, consumers) - - def _create_allocations(alloc_list): - try: - alloc_list.replace_all() - LOG.debug("Successfully wrote allocations %s", alloc_list) - except Exception: - _delete_consumers(new_consumers_created) - raise - - try: - _create_allocations(allocations) - except exception.NotFound as exc: - raise webob.exc.HTTPBadRequest( - _("Unable to allocate inventory %(error)s") % {'error': exc}) - except exception.InvalidInventory as exc: - # InvalidInventory is a parent for several exceptions that - # indicate either that Inventory is not present, or that - # capacity limits have been exceeded. - raise webob.exc.HTTPConflict( - _('Unable to allocate inventory: %(error)s') % {'error': exc}) - except exception.ConcurrentUpdateDetected as exc: - raise webob.exc.HTTPConflict( - _('Inventory and/or allocations changed while attempting to ' - 'allocate: %(error)s') % {'error': exc}, - comment=errors.CONCURRENT_UPDATE) - - req.response.status = 204 - req.response.content_type = None - return req.response - - -@wsgi_wrapper.PlacementWsgify -def delete_allocations(req): - context = req.environ['placement.context'] - context.can(policies.ALLOC_DELETE) - consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid') - - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - context, consumer_uuid) - if allocations: - try: - allocations.delete_all() - # NOTE(pumaranikar): Following NotFound exception added in the case - # when allocation is deleted from allocations list by some other - # activity. In that case, delete_all() will throw a NotFound exception. - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - _("Allocation for consumer with id %(id)s not found." - "error: %(error)s") % - {'id': consumer_uuid, 'error': exc}) - else: - raise webob.exc.HTTPNotFound( - _("No allocations for consumer '%(consumer_uuid)s'") % - {'consumer_uuid': consumer_uuid}) - LOG.debug("Successfully deleted allocations %s", allocations) - - req.response.status = 204 - req.response.content_type = None - return req.response diff --git a/nova/api/openstack/placement/handlers/allocation_candidate.py b/nova/api/openstack/placement/handlers/allocation_candidate.py deleted file mode 100644 index f5425cdf4ff..00000000000 --- a/nova/api/openstack/placement/handlers/allocation_candidate.py +++ /dev/null @@ -1,332 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Placement API handlers for getting allocation candidates.""" - -import collections - -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils -import six -import webob - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.policies import allocation_candidate as \ - policies -from nova.api.openstack.placement.schemas import allocation_candidate as schema -from nova.api.openstack.placement import util -from nova.api.openstack.placement import wsgi_wrapper -from nova.i18n import _ - - -def _transform_allocation_requests_dict(alloc_reqs): - """Turn supplied list of AllocationRequest objects into a list of - allocations dicts keyed by resource provider uuid of resources involved - in the allocation request. The returned results are intended to be used - as the body of a PUT /allocations/{consumer_uuid} HTTP request at - micoversion 1.12 (and beyond). The JSON objects look like the following: - - [ - { - "allocations": { - $rp_uuid1: { - "resources": { - "MEMORY_MB": 512 - ... - } - }, - $rp_uuid2: { - "resources": { - "DISK_GB": 1024 - ... - } - } - }, - }, - ... - ] - """ - results = [] - - for ar in alloc_reqs: - # A default dict of {$rp_uuid: "resources": {}) - rp_resources = collections.defaultdict(lambda: dict(resources={})) - for rr in ar.resource_requests: - res_dict = rp_resources[rr.resource_provider.uuid]['resources'] - res_dict[rr.resource_class] = rr.amount - results.append(dict(allocations=rp_resources)) - - return results - - -def _transform_allocation_requests_list(alloc_reqs): - """Turn supplied list of AllocationRequest objects into a list of dicts of - resources involved in the allocation request. The returned results is - intended to be able to be used as the body of a PUT - /allocations/{consumer_uuid} HTTP request, prior to microversion 1.12, - so therefore we return a list of JSON objects that looks like the - following: - - [ - { - "allocations": [ - { - "resource_provider": { - "uuid": $rp_uuid, - } - "resources": { - $resource_class: $requested_amount, ... - }, - }, ... - ], - }, ... - ] - """ - results = [] - for ar in alloc_reqs: - provider_resources = collections.defaultdict(dict) - for rr in ar.resource_requests: - res_dict = provider_resources[rr.resource_provider.uuid] - res_dict[rr.resource_class] = rr.amount - - allocs = [ - { - "resource_provider": { - "uuid": rp_uuid, - }, - "resources": resources, - } for rp_uuid, resources in provider_resources.items() - ] - alloc = { - "allocations": allocs - } - results.append(alloc) - return results - - -def _transform_provider_summaries(p_sums, requests, want_version): - """Turn supplied list of ProviderSummary objects into a dict, keyed by - resource provider UUID, of dicts of provider and inventory information. - The traits only show up when `want_version` is 1.17 or newer. All the - resource classes are shown when `want_version` is 1.27 or newer while - only requested resources are included in the `provider_summaries` - for older versions. The parent and root provider uuids only show up - when `want_version` is 1.29 or newer. - - { - RP_UUID_1: { - 'resources': { - 'DISK_GB': { - 'capacity': 100, - 'used': 0, - }, - 'VCPU': { - 'capacity': 4, - 'used': 0, - } - }, - # traits shows up from microversion 1.17 - 'traits': [ - 'HW_CPU_X86_AVX512F', - 'HW_CPU_X86_AVX512CD' - ] - # parent/root provider uuids show up from microversion 1.29 - parent_provider_uuid: null, - root_provider_uuid: RP_UUID_1 - }, - RP_UUID_2: { - 'resources': { - 'DISK_GB': { - 'capacity': 100, - 'used': 0, - }, - 'VCPU': { - 'capacity': 4, - 'used': 0, - } - }, - # traits shows up from microversion 1.17 - 'traits': [ - 'HW_NIC_OFFLOAD_TSO', - 'HW_NIC_OFFLOAD_GRO' - ], - # parent/root provider uuids show up from microversion 1.29 - parent_provider_uuid: null, - root_provider_uuid: RP_UUID_2 - } - } - """ - include_traits = want_version.matches((1, 17)) - include_all_resources = want_version.matches((1, 27)) - enable_nested_providers = want_version.matches((1, 29)) - - ret = {} - requested_resources = set() - - for requested_group in requests.values(): - requested_resources |= set(requested_group.resources) - - # if include_all_resources is false, only requested resources are - # included in the provider_summaries. - for ps in p_sums: - resources = { - psr.resource_class: { - 'capacity': psr.capacity, - 'used': psr.used, - } for psr in ps.resources if ( - include_all_resources or - psr.resource_class in requested_resources) - } - - ret[ps.resource_provider.uuid] = {'resources': resources} - - if include_traits: - ret[ps.resource_provider.uuid]['traits'] = [ - t.name for t in ps.traits] - - if enable_nested_providers: - ret[ps.resource_provider.uuid]['parent_provider_uuid'] = ( - ps.resource_provider.parent_provider_uuid) - ret[ps.resource_provider.uuid]['root_provider_uuid'] = ( - ps.resource_provider.root_provider_uuid) - - return ret - - -def _exclude_nested_providers(alloc_cands): - """Exclude allocation requests and provider summaries for old microversions - if they involve more than one provider from the same tree. - """ - # Build a temporary dict, keyed by root RP UUID of sets of UUIDs of all RPs - # in that tree. - tree_rps_by_root = collections.defaultdict(set) - for ps in alloc_cands.provider_summaries: - rp_uuid = ps.resource_provider.uuid - root_uuid = ps.resource_provider.root_provider_uuid - tree_rps_by_root[root_uuid].add(rp_uuid) - # We use this to get a list of sets of providers in each tree - tree_sets = list(tree_rps_by_root.values()) - - for a_req in alloc_cands.allocation_requests[:]: - alloc_rp_uuids = set([ - arr.resource_provider.uuid for arr in a_req.resource_requests]) - # If more than one allocation is provided by the same tree, kill - # that allocation request. - if any(len(tree_set & alloc_rp_uuids) > 1 for tree_set in tree_sets): - alloc_cands.allocation_requests.remove(a_req) - - # Exclude eliminated providers from the provider summaries. - all_rp_uuids = set() - for a_req in alloc_cands.allocation_requests: - all_rp_uuids |= set( - arr.resource_provider.uuid for arr in a_req.resource_requests) - for ps in alloc_cands.provider_summaries[:]: - if ps.resource_provider.uuid not in all_rp_uuids: - alloc_cands.provider_summaries.remove(ps) - - return alloc_cands - - -def _transform_allocation_candidates(alloc_cands, requests, want_version): - """Turn supplied AllocationCandidates object into a dict containing - allocation requests and provider summaries. - - { - 'allocation_requests': , - 'provider_summaries': , - } - """ - # exclude nested providers with old microversions - if not want_version.matches((1, 29)): - alloc_cands = _exclude_nested_providers(alloc_cands) - - if want_version.matches((1, 12)): - a_reqs = _transform_allocation_requests_dict( - alloc_cands.allocation_requests) - else: - a_reqs = _transform_allocation_requests_list( - alloc_cands.allocation_requests) - - p_sums = _transform_provider_summaries( - alloc_cands.provider_summaries, requests, want_version) - - return { - 'allocation_requests': a_reqs, - 'provider_summaries': p_sums, - } - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.10') -@util.check_accept('application/json') -def list_allocation_candidates(req): - """GET a JSON object with a list of allocation requests and a JSON object - of provider summary objects - - On success return a 200 and an application/json body representing - a collection of allocation requests and provider summaries - """ - context = req.environ['placement.context'] - context.can(policies.LIST) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - get_schema = schema.GET_SCHEMA_1_10 - if want_version.matches((1, 25)): - get_schema = schema.GET_SCHEMA_1_25 - elif want_version.matches((1, 21)): - get_schema = schema.GET_SCHEMA_1_21 - elif want_version.matches((1, 17)): - get_schema = schema.GET_SCHEMA_1_17 - elif want_version.matches((1, 16)): - get_schema = schema.GET_SCHEMA_1_16 - util.validate_query_params(req, get_schema) - - requests = util.parse_qs_request_groups(req) - limit = req.GET.getall('limit') - # JSONschema has already confirmed that limit has the form - # of an integer. - if limit: - limit = int(limit[0]) - - group_policy = req.GET.getall('group_policy') or None - # Schema ensures we get either "none" or "isolate" - if group_policy: - group_policy = group_policy[0] - else: - # group_policy is required if more than one numbered request group was - # specified. - if len([rg for rg in requests.values() if rg.use_same_provider]) > 1: - raise webob.exc.HTTPBadRequest( - _('The "group_policy" parameter is required when specifying ' - 'more than one "resources{N}" parameter.')) - - try: - cands = rp_obj.AllocationCandidates.get_by_requests( - context, requests, limit=limit, group_policy=group_policy) - except exception.ResourceClassNotFound as exc: - raise webob.exc.HTTPBadRequest( - _('Invalid resource class in resources parameter: %(error)s') % - {'error': exc}) - except exception.TraitNotFound as exc: - raise webob.exc.HTTPBadRequest(six.text_type(exc)) - - response = req.response - trx_cands = _transform_allocation_candidates(cands, requests, want_version) - json_data = jsonutils.dumps(trx_cands) - response.body = encodeutils.to_utf8(json_data) - response.content_type = 'application/json' - if want_version.matches((1, 15)): - response.cache_control = 'no-cache' - response.last_modified = timeutils.utcnow(with_timezone=True) - return response diff --git a/nova/api/openstack/placement/handlers/inventory.py b/nova/api/openstack/placement/handlers/inventory.py deleted file mode 100644 index 46135761568..00000000000 --- a/nova/api/openstack/placement/handlers/inventory.py +++ /dev/null @@ -1,467 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Inventory handlers for Placement API.""" - -import copy -import operator - -from oslo_db import exception as db_exc -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -import webob - -from nova.api.openstack.placement import errors -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.policies import inventory as policies -from nova.api.openstack.placement.schemas import inventory as schema -from nova.api.openstack.placement import util -from nova.api.openstack.placement import wsgi_wrapper -from nova.db import constants as db_const -from nova.i18n import _ - - -# NOTE(cdent): We keep our own representation of inventory defaults -# and output fields, separate from the versioned object to avoid -# inadvertent API changes when the object defaults are changed. -OUTPUT_INVENTORY_FIELDS = [ - 'total', - 'reserved', - 'min_unit', - 'max_unit', - 'step_size', - 'allocation_ratio', -] -INVENTORY_DEFAULTS = { - 'reserved': 0, - 'min_unit': 1, - 'max_unit': db_const.MAX_INT, - 'step_size': 1, - 'allocation_ratio': 1.0 -} - - -def _extract_inventory(body, schema): - """Extract and validate inventory from JSON body.""" - data = util.extract_json(body, schema) - - inventory_data = copy.copy(INVENTORY_DEFAULTS) - inventory_data.update(data) - - return inventory_data - - -def _extract_inventories(body, schema): - """Extract and validate multiple inventories from JSON body.""" - data = util.extract_json(body, schema) - - inventories = {} - for res_class, raw_inventory in data['inventories'].items(): - inventory_data = copy.copy(INVENTORY_DEFAULTS) - inventory_data.update(raw_inventory) - inventories[res_class] = inventory_data - - data['inventories'] = inventories - return data - - -def _make_inventory_object(resource_provider, resource_class, **data): - """Single place to catch malformed Inventories.""" - # TODO(cdent): Some of the validation checks that are done here - # could be done via JSONschema (using, for example, "minimum": - # 0) for non-negative integers. It's not clear if that is - # duplication or decoupling so leaving it as this for now. - try: - inventory = rp_obj.Inventory( - resource_provider=resource_provider, - resource_class=resource_class, **data) - except (ValueError, TypeError) as exc: - raise webob.exc.HTTPBadRequest( - _('Bad inventory %(class)s for resource provider ' - '%(rp_uuid)s: %(error)s') % {'class': resource_class, - 'rp_uuid': resource_provider.uuid, - 'error': exc}) - return inventory - - -def _send_inventories(req, resource_provider, inventories): - """Send a JSON representation of a list of inventories.""" - response = req.response - response.status = 200 - output, last_modified = _serialize_inventories( - inventories, resource_provider.generation) - response.body = encodeutils.to_utf8(jsonutils.dumps(output)) - response.content_type = 'application/json' - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - if want_version.matches((1, 15)): - response.last_modified = last_modified - response.cache_control = 'no-cache' - return response - - -def _send_inventory(req, resource_provider, inventory, status=200): - """Send a JSON representation of one single inventory.""" - response = req.response - response.status = status - response.body = encodeutils.to_utf8(jsonutils.dumps(_serialize_inventory( - inventory, generation=resource_provider.generation))) - response.content_type = 'application/json' - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - if want_version.matches((1, 15)): - modified = util.pick_last_modified(None, inventory) - response.last_modified = modified - response.cache_control = 'no-cache' - return response - - -def _serialize_inventory(inventory, generation=None): - """Turn a single inventory into a dictionary.""" - data = { - field: getattr(inventory, field) - for field in OUTPUT_INVENTORY_FIELDS - } - if generation: - data['resource_provider_generation'] = generation - return data - - -def _serialize_inventories(inventories, generation): - """Turn a list of inventories in a dict by resource class.""" - inventories_by_class = {inventory.resource_class: inventory - for inventory in inventories} - inventories_dict = {} - last_modified = None - for resource_class, inventory in inventories_by_class.items(): - last_modified = util.pick_last_modified(last_modified, inventory) - inventories_dict[resource_class] = _serialize_inventory( - inventory, generation=None) - return ({'resource_provider_generation': generation, - 'inventories': inventories_dict}, last_modified) - - -def _validate_inventory_capacity(version, inventories): - """Validate inventory capacity. - - :param version: request microversion. - :param inventories: Inventory or InventoryList to validate capacities of. - :raises: exception.InvalidInventoryCapacityReservedCanBeTotal if request - microversion is 1.26 or higher and any inventory has capacity < 0. - :raises: exception.InvalidInventoryCapacity if request - microversion is lower than 1.26 and any inventory has capacity <= 0. - """ - if not version.matches((1, 26)): - op = operator.le - exc_class = exception.InvalidInventoryCapacity - else: - op = operator.lt - exc_class = exception.InvalidInventoryCapacityReservedCanBeTotal - if isinstance(inventories, rp_obj.Inventory): - inventories = rp_obj.InventoryList(objects=[inventories]) - for inventory in inventories: - if op(inventory.capacity, 0): - raise exc_class( - resource_class=inventory.resource_class, - resource_provider=inventory.resource_provider.uuid) - - -@wsgi_wrapper.PlacementWsgify -@util.require_content('application/json') -def create_inventory(req): - """POST to create one inventory. - - On success return a 201 response, a location header pointing - to the newly created inventory and an application/json representation - of the inventory. - """ - context = req.environ['placement.context'] - context.can(policies.CREATE) - uuid = util.wsgi_path_item(req.environ, 'uuid') - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - data = _extract_inventory(req.body, schema.POST_INVENTORY_SCHEMA) - resource_class = data.pop('resource_class') - - inventory = _make_inventory_object(resource_provider, - resource_class, - **data) - - try: - _validate_inventory_capacity( - req.environ[microversion.MICROVERSION_ENVIRON], inventory) - resource_provider.add_inventory(inventory) - except (exception.ConcurrentUpdateDetected, - db_exc.DBDuplicateEntry) as exc: - raise webob.exc.HTTPConflict( - _('Update conflict: %(error)s') % {'error': exc}, - comment=errors.CONCURRENT_UPDATE) - except (exception.InvalidInventoryCapacity, - exception.NotFound) as exc: - raise webob.exc.HTTPBadRequest( - _('Unable to create inventory for resource provider ' - '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid, - 'error': exc}) - - response = req.response - response.location = util.inventory_url( - req.environ, resource_provider, resource_class) - return _send_inventory(req, resource_provider, inventory, - status=201) - - -@wsgi_wrapper.PlacementWsgify -def delete_inventory(req): - """DELETE to destroy a single inventory. - - If the inventory is in use or resource provider generation is out - of sync return a 409. - - On success return a 204 and an empty body. - """ - context = req.environ['placement.context'] - context.can(policies.DELETE) - uuid = util.wsgi_path_item(req.environ, 'uuid') - resource_class = util.wsgi_path_item(req.environ, 'resource_class') - - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - try: - resource_provider.delete_inventory(resource_class) - except (exception.ConcurrentUpdateDetected, - exception.InventoryInUse) as exc: - raise webob.exc.HTTPConflict( - _('Unable to delete inventory of class %(class)s: %(error)s') % - {'class': resource_class, 'error': exc}, - comment=errors.CONCURRENT_UPDATE) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - _('No inventory of class %(class)s found for delete: %(error)s') % - {'class': resource_class, 'error': exc}) - - response = req.response - response.status = 204 - response.content_type = None - return response - - -@wsgi_wrapper.PlacementWsgify -@util.check_accept('application/json') -def get_inventories(req): - """GET a list of inventories. - - On success return a 200 with an application/json body representing - a collection of inventories. - """ - context = req.environ['placement.context'] - context.can(policies.LIST) - uuid = util.wsgi_path_item(req.environ, 'uuid') - try: - rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - _("No resource provider with uuid %(uuid)s found : %(error)s") % - {'uuid': uuid, 'error': exc}) - - inv_list = rp_obj.InventoryList.get_all_by_resource_provider(context, rp) - - return _send_inventories(req, rp, inv_list) - - -@wsgi_wrapper.PlacementWsgify -@util.check_accept('application/json') -def get_inventory(req): - """GET one inventory. - - On success return a 200 an application/json body representing one - inventory. - """ - context = req.environ['placement.context'] - context.can(policies.SHOW) - uuid = util.wsgi_path_item(req.environ, 'uuid') - resource_class = util.wsgi_path_item(req.environ, 'resource_class') - try: - rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - _("No resource provider with uuid %(uuid)s found : %(error)s") % - {'uuid': uuid, 'error': exc}) - - inv_list = rp_obj.InventoryList.get_all_by_resource_provider(context, rp) - inventory = inv_list.find(resource_class) - - if not inventory: - raise webob.exc.HTTPNotFound( - _('No inventory of class %(class)s for %(rp_uuid)s') % - {'class': resource_class, 'rp_uuid': uuid}) - - return _send_inventory(req, rp, inventory) - - -@wsgi_wrapper.PlacementWsgify -@util.require_content('application/json') -def set_inventories(req): - """PUT to set all inventory for a resource provider. - - Create, update and delete inventory as required to reset all - the inventory. - - If the resource generation is out of sync, return a 409. - If an inventory to be deleted is in use, return a 409. - If any inventory to be created or updated has settings which are - invalid (for example reserved exceeds capacity), return a 400. - - On success return a 200 with an application/json body representing - the inventories. - """ - context = req.environ['placement.context'] - context.can(policies.UPDATE) - uuid = util.wsgi_path_item(req.environ, 'uuid') - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - - data = _extract_inventories(req.body, schema.PUT_INVENTORY_SCHEMA) - if data['resource_provider_generation'] != resource_provider.generation: - raise webob.exc.HTTPConflict( - _('resource provider generation conflict'), - comment=errors.CONCURRENT_UPDATE) - - inv_list = [] - for res_class, inventory_data in data['inventories'].items(): - inventory = _make_inventory_object( - resource_provider, res_class, **inventory_data) - inv_list.append(inventory) - inventories = rp_obj.InventoryList(objects=inv_list) - - try: - _validate_inventory_capacity( - req.environ[microversion.MICROVERSION_ENVIRON], inventories) - resource_provider.set_inventory(inventories) - except exception.ResourceClassNotFound as exc: - raise webob.exc.HTTPBadRequest( - _('Unknown resource class in inventory for resource provider ' - '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid, - 'error': exc}) - except exception.InventoryWithResourceClassNotFound as exc: - raise webob.exc.HTTPConflict( - _('Race condition detected when setting inventory. No inventory ' - 'record with resource class for resource provider ' - '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid, - 'error': exc}) - except (exception.ConcurrentUpdateDetected, - db_exc.DBDuplicateEntry) as exc: - raise webob.exc.HTTPConflict( - _('update conflict: %(error)s') % {'error': exc}, - comment=errors.CONCURRENT_UPDATE) - except exception.InventoryInUse as exc: - raise webob.exc.HTTPConflict( - _('update conflict: %(error)s') % {'error': exc}, - comment=errors.INVENTORY_INUSE) - except exception.InvalidInventoryCapacity as exc: - raise webob.exc.HTTPBadRequest( - _('Unable to update inventory for resource provider ' - '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid, - 'error': exc}) - - return _send_inventories(req, resource_provider, inventories) - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.5', status_code=405) -def delete_inventories(req): - """DELETE all inventory for a resource provider. - - Delete inventory as required to reset all the inventory. - If an inventory to be deleted is in use, return a 409 Conflict. - On success return a 204 No content. - Return 405 Method Not Allowed if the wanted microversion does not match. - """ - context = req.environ['placement.context'] - context.can(policies.DELETE) - uuid = util.wsgi_path_item(req.environ, 'uuid') - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - - inventories = rp_obj.InventoryList(objects=[]) - - try: - resource_provider.set_inventory(inventories) - except exception.ConcurrentUpdateDetected: - raise webob.exc.HTTPConflict( - _('Unable to delete inventory for resource provider ' - '%(rp_uuid)s because the inventory was updated by ' - 'another process. Please retry your request.') - % {'rp_uuid': resource_provider.uuid}, - comment=errors.CONCURRENT_UPDATE) - except exception.InventoryInUse as ex: - # NOTE(mriedem): This message cannot change without impacting the - # nova.scheduler.client.report._RE_INV_IN_USE regex. - raise webob.exc.HTTPConflict(ex.format_message(), - comment=errors.INVENTORY_INUSE) - - response = req.response - response.status = 204 - response.content_type = None - - return response - - -@wsgi_wrapper.PlacementWsgify -@util.require_content('application/json') -def update_inventory(req): - """PUT to update one inventory. - - If the resource generation is out of sync, return a 409. - If the inventory has settings which are invalid (for example - reserved exceeds capacity), return a 400. - - On success return a 200 with an application/json body representing - the inventory. - """ - context = req.environ['placement.context'] - context.can(policies.UPDATE) - uuid = util.wsgi_path_item(req.environ, 'uuid') - resource_class = util.wsgi_path_item(req.environ, 'resource_class') - - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - - data = _extract_inventory(req.body, schema.BASE_INVENTORY_SCHEMA) - if data['resource_provider_generation'] != resource_provider.generation: - raise webob.exc.HTTPConflict( - _('resource provider generation conflict'), - comment=errors.CONCURRENT_UPDATE) - - inventory = _make_inventory_object(resource_provider, - resource_class, - **data) - - try: - _validate_inventory_capacity( - req.environ[microversion.MICROVERSION_ENVIRON], inventory) - resource_provider.update_inventory(inventory) - except (exception.ConcurrentUpdateDetected, - db_exc.DBDuplicateEntry) as exc: - raise webob.exc.HTTPConflict( - _('update conflict: %(error)s') % {'error': exc}, - comment=errors.CONCURRENT_UPDATE) - except exception.InventoryWithResourceClassNotFound as exc: - raise webob.exc.HTTPBadRequest( - _('No inventory record with resource class for resource provider ' - '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid, - 'error': exc}) - except exception.InvalidInventoryCapacity as exc: - raise webob.exc.HTTPBadRequest( - _('Unable to update inventory for resource provider ' - '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid, - 'error': exc}) - - return _send_inventory(req, resource_provider, inventory) diff --git a/nova/api/openstack/placement/handlers/resource_class.py b/nova/api/openstack/placement/handlers/resource_class.py deleted file mode 100644 index be137170989..00000000000 --- a/nova/api/openstack/placement/handlers/resource_class.py +++ /dev/null @@ -1,241 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API handlers for resource classes.""" - -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils -import webob - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.policies import resource_class as policies -from nova.api.openstack.placement.schemas import resource_class as schema -from nova.api.openstack.placement import util -from nova.api.openstack.placement import wsgi_wrapper -from nova.i18n import _ - - -def _serialize_links(environ, rc): - url = util.resource_class_url(environ, rc) - links = [{'rel': 'self', 'href': url}] - return links - - -def _serialize_resource_class(environ, rc): - data = { - 'name': rc.name, - 'links': _serialize_links(environ, rc) - } - return data - - -def _serialize_resource_classes(environ, rcs, want_version): - output = [] - last_modified = None - get_last_modified = want_version.matches((1, 15)) - for rc in rcs: - if get_last_modified: - last_modified = util.pick_last_modified(last_modified, rc) - data = _serialize_resource_class(environ, rc) - output.append(data) - last_modified = last_modified or timeutils.utcnow(with_timezone=True) - return ({"resource_classes": output}, last_modified) - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.2') -@util.require_content('application/json') -def create_resource_class(req): - """POST to create a resource class. - - On success return a 201 response with an empty body and a location - header pointing to the newly created resource class. - """ - context = req.environ['placement.context'] - context.can(policies.CREATE) - data = util.extract_json(req.body, schema.POST_RC_SCHEMA_V1_2) - - try: - rc = rp_obj.ResourceClass(context, name=data['name']) - rc.create() - except exception.ResourceClassExists: - raise webob.exc.HTTPConflict( - _('Conflicting resource class already exists: %(name)s') % - {'name': data['name']}) - except exception.MaxDBRetriesExceeded: - raise webob.exc.HTTPConflict( - _('Max retries of DB transaction exceeded attempting ' - 'to create resource class: %(name)s, please' - 'try again.') % - {'name': data['name']}) - - req.response.location = util.resource_class_url(req.environ, rc) - req.response.status = 201 - req.response.content_type = None - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.2') -def delete_resource_class(req): - """DELETE to destroy a single resource class. - - On success return a 204 and an empty body. - """ - name = util.wsgi_path_item(req.environ, 'name') - context = req.environ['placement.context'] - context.can(policies.DELETE) - # The containing application will catch a not found here. - rc = rp_obj.ResourceClass.get_by_name(context, name) - try: - rc.destroy() - except exception.ResourceClassCannotDeleteStandard as exc: - raise webob.exc.HTTPBadRequest( - _('Error in delete resource class: %(error)s') % {'error': exc}) - except exception.ResourceClassInUse as exc: - raise webob.exc.HTTPConflict( - _('Error in delete resource class: %(error)s') % {'error': exc}) - req.response.status = 204 - req.response.content_type = None - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.2') -@util.check_accept('application/json') -def get_resource_class(req): - """Get a single resource class. - - On success return a 200 with an application/json body representing - the resource class. - """ - name = util.wsgi_path_item(req.environ, 'name') - context = req.environ['placement.context'] - context.can(policies.SHOW) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - # The containing application will catch a not found here. - rc = rp_obj.ResourceClass.get_by_name(context, name) - - req.response.body = encodeutils.to_utf8(jsonutils.dumps( - _serialize_resource_class(req.environ, rc)) - ) - req.response.content_type = 'application/json' - if want_version.matches((1, 15)): - req.response.cache_control = 'no-cache' - # Non-custom resource classes will return None from pick_last_modified, - # so the 'or' causes utcnow to be used. - last_modified = util.pick_last_modified(None, rc) or timeutils.utcnow( - with_timezone=True) - req.response.last_modified = last_modified - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.2') -@util.check_accept('application/json') -def list_resource_classes(req): - """GET a list of resource classes. - - On success return a 200 and an application/json body representing - a collection of resource classes. - """ - context = req.environ['placement.context'] - context.can(policies.LIST) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - rcs = rp_obj.ResourceClassList.get_all(context) - - response = req.response - output, last_modified = _serialize_resource_classes( - req.environ, rcs, want_version) - response.body = encodeutils.to_utf8(jsonutils.dumps(output)) - response.content_type = 'application/json' - if want_version.matches((1, 15)): - response.last_modified = last_modified - response.cache_control = 'no-cache' - return response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.2', '1.6') -@util.require_content('application/json') -def update_resource_class(req): - """PUT to update a single resource class. - - On success return a 200 response with a representation of the updated - resource class. - """ - name = util.wsgi_path_item(req.environ, 'name') - context = req.environ['placement.context'] - context.can(policies.UPDATE) - - data = util.extract_json(req.body, schema.PUT_RC_SCHEMA_V1_2) - - # The containing application will catch a not found here. - rc = rp_obj.ResourceClass.get_by_name(context, name) - - rc.name = data['name'] - - try: - rc.save() - except exception.ResourceClassExists: - raise webob.exc.HTTPConflict( - _('Resource class already exists: %(name)s') % - {'name': rc.name}) - except exception.ResourceClassCannotUpdateStandard: - raise webob.exc.HTTPBadRequest( - _('Cannot update standard resource class %(rp_name)s') % - {'rp_name': name}) - - req.response.body = encodeutils.to_utf8(jsonutils.dumps( - _serialize_resource_class(req.environ, rc)) - ) - req.response.status = 200 - req.response.content_type = 'application/json' - return req.response - - -@wsgi_wrapper.PlacementWsgify # noqa -@microversion.version_handler('1.7') -def update_resource_class(req): - """PUT to create or validate the existence of single resource class. - - On a successful create return 201. Return 204 if the class already - exists. If the resource class is not a custom resource class, return - a 400. 409 might be a better choice, but 400 aligns with previous code. - """ - name = util.wsgi_path_item(req.environ, 'name') - context = req.environ['placement.context'] - context.can(policies.UPDATE) - - # Use JSON validation to validation resource class name. - util.extract_json('{"name": "%s"}' % name, schema.PUT_RC_SCHEMA_V1_2) - - status = 204 - try: - rc = rp_obj.ResourceClass.get_by_name(context, name) - except exception.NotFound: - try: - rc = rp_obj.ResourceClass(context, name=name) - rc.create() - status = 201 - # We will not see ResourceClassCannotUpdateStandard because - # that was already caught when validating the {name}. - except exception.ResourceClassExists: - # Someone just now created the class, so stick with 204 - pass - - req.response.status = status - req.response.content_type = None - req.response.location = util.resource_class_url(req.environ, rc) - return req.response diff --git a/nova/api/openstack/placement/handlers/resource_provider.py b/nova/api/openstack/placement/handlers/resource_provider.py deleted file mode 100644 index 8a9185ea2d6..00000000000 --- a/nova/api/openstack/placement/handlers/resource_provider.py +++ /dev/null @@ -1,297 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API handlers for resource providers.""" - -from oslo_db import exception as db_exc -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import webob - -from nova.api.openstack.placement import errors -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.policies import resource_provider as policies -from nova.api.openstack.placement.schemas import resource_provider as rp_schema -from nova.api.openstack.placement import util -from nova.api.openstack.placement import wsgi_wrapper -from nova.i18n import _ - - -def _serialize_links(environ, resource_provider): - url = util.resource_provider_url(environ, resource_provider) - links = [{'rel': 'self', 'href': url}] - rel_types = ['inventories', 'usages'] - want_version = environ[microversion.MICROVERSION_ENVIRON] - if want_version >= (1, 1): - rel_types.append('aggregates') - if want_version >= (1, 6): - rel_types.append('traits') - if want_version >= (1, 11): - rel_types.append('allocations') - for rel in rel_types: - links.append({'rel': rel, 'href': '%s/%s' % (url, rel)}) - return links - - -def _serialize_provider(environ, resource_provider, want_version): - data = { - 'uuid': resource_provider.uuid, - 'name': resource_provider.name, - 'generation': resource_provider.generation, - 'links': _serialize_links(environ, resource_provider) - } - if want_version.matches((1, 14)): - data['parent_provider_uuid'] = resource_provider.parent_provider_uuid - data['root_provider_uuid'] = resource_provider.root_provider_uuid - return data - - -def _serialize_providers(environ, resource_providers, want_version): - output = [] - last_modified = None - get_last_modified = want_version.matches((1, 15)) - for provider in resource_providers: - if get_last_modified: - last_modified = util.pick_last_modified(last_modified, provider) - provider_data = _serialize_provider(environ, provider, want_version) - output.append(provider_data) - last_modified = last_modified or timeutils.utcnow(with_timezone=True) - return ({"resource_providers": output}, last_modified) - - -@wsgi_wrapper.PlacementWsgify -@util.require_content('application/json') -def create_resource_provider(req): - """POST to create a resource provider. - - On success return a 201 response with an empty body and a location - header pointing to the newly created resource provider. - """ - context = req.environ['placement.context'] - context.can(policies.CREATE) - schema = rp_schema.POST_RESOURCE_PROVIDER_SCHEMA - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - if want_version.matches((1, 14)): - schema = rp_schema.POST_RP_SCHEMA_V1_14 - data = util.extract_json(req.body, schema) - - try: - uuid = data.setdefault('uuid', uuidutils.generate_uuid()) - resource_provider = rp_obj.ResourceProvider(context, **data) - resource_provider.create() - except db_exc.DBDuplicateEntry as exc: - # Whether exc.columns has one or two entries (in the event - # of both fields being duplicates) appears to be database - # dependent, so going with the complete solution here. - duplicate = ', '.join(['%s: %s' % (column, data[column]) - for column in exc.columns]) - raise webob.exc.HTTPConflict( - _('Conflicting resource provider %(duplicate)s already exists.') % - {'duplicate': duplicate}, - comment=errors.DUPLICATE_NAME) - except exception.ObjectActionError as exc: - raise webob.exc.HTTPBadRequest( - _('Unable to create resource provider "%(name)s", %(rp_uuid)s: ' - '%(error)s') % - {'name': data['name'], 'rp_uuid': uuid, 'error': exc}) - - req.response.location = util.resource_provider_url( - req.environ, resource_provider) - if want_version.matches(min_version=(1, 20)): - req.response.body = encodeutils.to_utf8(jsonutils.dumps( - _serialize_provider(req.environ, resource_provider, want_version))) - req.response.content_type = 'application/json' - modified = util.pick_last_modified(None, resource_provider) - req.response.last_modified = modified - req.response.cache_control = 'no-cache' - else: - req.response.status = 201 - req.response.content_type = None - return req.response - - -@wsgi_wrapper.PlacementWsgify -def delete_resource_provider(req): - """DELETE to destroy a single resource provider. - - On success return a 204 and an empty body. - """ - uuid = util.wsgi_path_item(req.environ, 'uuid') - context = req.environ['placement.context'] - context.can(policies.DELETE) - # The containing application will catch a not found here. - try: - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - resource_provider.destroy() - except exception.ResourceProviderInUse as exc: - raise webob.exc.HTTPConflict( - _('Unable to delete resource provider %(rp_uuid)s: %(error)s') % - {'rp_uuid': uuid, 'error': exc}, - comment=errors.PROVIDER_IN_USE) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - _("No resource provider with uuid %s found for delete") % uuid) - except exception.CannotDeleteParentResourceProvider as exc: - raise webob.exc.HTTPConflict( - _("Unable to delete parent resource provider %(rp_uuid)s: " - "It has child resource providers.") % {'rp_uuid': uuid}, - comment=errors.PROVIDER_CANNOT_DELETE_PARENT) - req.response.status = 204 - req.response.content_type = None - return req.response - - -@wsgi_wrapper.PlacementWsgify -@util.check_accept('application/json') -def get_resource_provider(req): - """Get a single resource provider. - - On success return a 200 with an application/json body representing - the resource provider. - """ - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - uuid = util.wsgi_path_item(req.environ, 'uuid') - context = req.environ['placement.context'] - context.can(policies.SHOW) - - # The containing application will catch a not found here. - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - - response = req.response - response.body = encodeutils.to_utf8(jsonutils.dumps( - _serialize_provider(req.environ, resource_provider, want_version))) - response.content_type = 'application/json' - if want_version.matches((1, 15)): - modified = util.pick_last_modified(None, resource_provider) - response.last_modified = modified - response.cache_control = 'no-cache' - return response - - -@wsgi_wrapper.PlacementWsgify -@util.check_accept('application/json') -def list_resource_providers(req): - """GET a list of resource providers. - - On success return a 200 and an application/json body representing - a collection of resource providers. - """ - context = req.environ['placement.context'] - context.can(policies.LIST) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - - schema = rp_schema.GET_RPS_SCHEMA_1_0 - if want_version.matches((1, 18)): - schema = rp_schema.GET_RPS_SCHEMA_1_18 - elif want_version.matches((1, 14)): - schema = rp_schema.GET_RPS_SCHEMA_1_14 - elif want_version.matches((1, 4)): - schema = rp_schema.GET_RPS_SCHEMA_1_4 - elif want_version.matches((1, 3)): - schema = rp_schema.GET_RPS_SCHEMA_1_3 - - allow_forbidden = want_version.matches((1, 22)) - - util.validate_query_params(req, schema) - - filters = {} - # special handling of member_of qparam since we allow multiple member_of - # params at microversion 1.24. - if 'member_of' in req.GET: - filters['member_of'] = util.normalize_member_of_qs_params(req) - - qpkeys = ('uuid', 'name', 'in_tree', 'resources', 'required') - for attr in qpkeys: - if attr in req.GET: - value = req.GET[attr] - if attr == 'resources': - value = util.normalize_resources_qs_param(value) - elif attr == 'required': - value = util.normalize_traits_qs_param( - value, allow_forbidden=allow_forbidden) - filters[attr] = value - try: - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - context, filters) - except exception.ResourceClassNotFound as exc: - raise webob.exc.HTTPBadRequest( - _('Invalid resource class in resources parameter: %(error)s') % - {'error': exc}) - except exception.TraitNotFound as exc: - raise webob.exc.HTTPBadRequest( - _('Invalid trait(s) in "required" parameter: %(error)s') % - {'error': exc}) - - response = req.response - output, last_modified = _serialize_providers( - req.environ, resource_providers, want_version) - response.body = encodeutils.to_utf8(jsonutils.dumps(output)) - response.content_type = 'application/json' - if want_version.matches((1, 15)): - response.last_modified = last_modified - response.cache_control = 'no-cache' - return response - - -@wsgi_wrapper.PlacementWsgify -@util.require_content('application/json') -def update_resource_provider(req): - """PUT to update a single resource provider. - - On success return a 200 response with a representation of the updated - resource provider. - """ - uuid = util.wsgi_path_item(req.environ, 'uuid') - context = req.environ['placement.context'] - context.can(policies.UPDATE) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - - # The containing application will catch a not found here. - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - - schema = rp_schema.PUT_RESOURCE_PROVIDER_SCHEMA - if want_version.matches((1, 14)): - schema = rp_schema.PUT_RP_SCHEMA_V1_14 - - data = util.extract_json(req.body, schema) - - for field in rp_obj.ResourceProvider.SETTABLE_FIELDS: - if field in data: - setattr(resource_provider, field, data[field]) - - try: - resource_provider.save() - except db_exc.DBDuplicateEntry as exc: - raise webob.exc.HTTPConflict( - _('Conflicting resource provider %(name)s already exists.') % - {'name': data['name']}, - comment=errors.DUPLICATE_NAME) - except exception.ObjectActionError as exc: - raise webob.exc.HTTPBadRequest( - _('Unable to save resource provider %(rp_uuid)s: %(error)s') % - {'rp_uuid': uuid, 'error': exc}) - - response = req.response - response.status = 200 - response.body = encodeutils.to_utf8(jsonutils.dumps( - _serialize_provider(req.environ, resource_provider, want_version))) - response.content_type = 'application/json' - if want_version.matches((1, 15)): - response.last_modified = resource_provider.updated_at - response.cache_control = 'no-cache' - return response diff --git a/nova/api/openstack/placement/handlers/root.py b/nova/api/openstack/placement/handlers/root.py deleted file mode 100644 index 298dab3816e..00000000000 --- a/nova/api/openstack/placement/handlers/root.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Handler for the root of the Placement API.""" - -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils - - -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement import wsgi_wrapper - - -@wsgi_wrapper.PlacementWsgify -def home(req): - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - min_version = microversion.min_version_string() - max_version = microversion.max_version_string() - # NOTE(cdent): As sections of the api are added, links can be - # added to this output to align with the guidelines at - # http://specs.openstack.org/openstack/api-wg/guidelines/microversion_specification.html#version-discovery - version_data = { - 'id': 'v%s' % min_version, - 'max_version': max_version, - 'min_version': min_version, - # for now there is only ever one version, so it must be CURRENT - 'status': 'CURRENT', - 'links': [{ - # Point back to this same URL as the root of this version. - # NOTE(cdent): We explicitly want this to be a relative-URL - # representation of "this same URL", otherwise placement needs - # to keep track of proxy addresses and the like, which we have - # avoided thus far, in order to construct full URLs. Placement - # is much easier to scale if we never track that stuff. - 'rel': 'self', - 'href': '', - }], - } - version_json = jsonutils.dumps({'versions': [version_data]}) - req.response.body = encodeutils.to_utf8(version_json) - req.response.content_type = 'application/json' - if want_version.matches((1, 15)): - req.response.cache_control = 'no-cache' - req.response.last_modified = timeutils.utcnow(with_timezone=True) - return req.response diff --git a/nova/api/openstack/placement/handlers/trait.py b/nova/api/openstack/placement/handlers/trait.py deleted file mode 100644 index b76907f1ad9..00000000000 --- a/nova/api/openstack/placement/handlers/trait.py +++ /dev/null @@ -1,270 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Traits handlers for Placement API.""" - -import jsonschema -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils -import webob - -from nova.api.openstack.placement import errors -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.policies import trait as policies -from nova.api.openstack.placement.schemas import trait as schema -from nova.api.openstack.placement import util -from nova.api.openstack.placement import wsgi_wrapper -from nova.i18n import _ - - -def _normalize_traits_qs_param(qs): - try: - op, value = qs.split(':', 1) - except ValueError: - msg = _('Badly formatted name parameter. Expected name query string ' - 'parameter in form: ' - '?name=[in|startswith]:[name1,name2|prefix]. Got: "%s"') - msg = msg % qs - raise webob.exc.HTTPBadRequest(msg) - - filters = {} - if op == 'in': - filters['name_in'] = value.split(',') - elif op == 'startswith': - filters['prefix'] = value - - return filters - - -def _serialize_traits(traits, want_version): - last_modified = None - get_last_modified = want_version.matches((1, 15)) - trait_names = [] - for trait in traits: - if get_last_modified: - last_modified = util.pick_last_modified(last_modified, trait) - trait_names.append(trait.name) - - # If there were no traits, set last_modified to now - last_modified = last_modified or timeutils.utcnow(with_timezone=True) - - return {'traits': trait_names}, last_modified - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.6') -def put_trait(req): - context = req.environ['placement.context'] - context.can(policies.TRAITS_UPDATE) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - name = util.wsgi_path_item(req.environ, 'name') - - try: - jsonschema.validate(name, schema.CUSTOM_TRAIT) - except jsonschema.ValidationError: - raise webob.exc.HTTPBadRequest( - _('The trait is invalid. A valid trait must be no longer than ' - '255 characters, start with the prefix "CUSTOM_" and use ' - 'following characters: "A"-"Z", "0"-"9" and "_"')) - - trait = rp_obj.Trait(context) - trait.name = name - - try: - trait.create() - req.response.status = 201 - except exception.TraitExists: - # Get the trait that already exists to get last-modified time. - if want_version.matches((1, 15)): - trait = rp_obj.Trait.get_by_name(context, name) - req.response.status = 204 - - req.response.content_type = None - req.response.location = util.trait_url(req.environ, trait) - if want_version.matches((1, 15)): - req.response.last_modified = trait.created_at - req.response.cache_control = 'no-cache' - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.6') -def get_trait(req): - context = req.environ['placement.context'] - context.can(policies.TRAITS_SHOW) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - name = util.wsgi_path_item(req.environ, 'name') - - try: - trait = rp_obj.Trait.get_by_name(context, name) - except exception.TraitNotFound as ex: - raise webob.exc.HTTPNotFound(ex.format_message()) - - req.response.status = 204 - req.response.content_type = None - if want_version.matches((1, 15)): - req.response.last_modified = trait.created_at - req.response.cache_control = 'no-cache' - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.6') -def delete_trait(req): - context = req.environ['placement.context'] - context.can(policies.TRAITS_DELETE) - name = util.wsgi_path_item(req.environ, 'name') - - try: - trait = rp_obj.Trait.get_by_name(context, name) - trait.destroy() - except exception.TraitNotFound as ex: - raise webob.exc.HTTPNotFound(ex.format_message()) - except exception.TraitCannotDeleteStandard as ex: - raise webob.exc.HTTPBadRequest(ex.format_message()) - except exception.TraitInUse as ex: - raise webob.exc.HTTPConflict(ex.format_message()) - - req.response.status = 204 - req.response.content_type = None - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.6') -@util.check_accept('application/json') -def list_traits(req): - context = req.environ['placement.context'] - context.can(policies.TRAITS_LIST) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - filters = {} - - util.validate_query_params(req, schema.LIST_TRAIT_SCHEMA) - - if 'name' in req.GET: - filters = _normalize_traits_qs_param(req.GET['name']) - if 'associated' in req.GET: - if req.GET['associated'].lower() not in ['true', 'false']: - raise webob.exc.HTTPBadRequest( - _('The query parameter "associated" only accepts ' - '"true" or "false"')) - filters['associated'] = ( - True if req.GET['associated'].lower() == 'true' else False) - - traits = rp_obj.TraitList.get_all(context, filters) - req.response.status = 200 - output, last_modified = _serialize_traits(traits, want_version) - if want_version.matches((1, 15)): - req.response.last_modified = last_modified - req.response.cache_control = 'no-cache' - req.response.body = encodeutils.to_utf8(jsonutils.dumps(output)) - req.response.content_type = 'application/json' - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.6') -@util.check_accept('application/json') -def list_traits_for_resource_provider(req): - context = req.environ['placement.context'] - context.can(policies.RP_TRAIT_LIST) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - uuid = util.wsgi_path_item(req.environ, 'uuid') - - # Resource provider object is needed for two things: If it is - # NotFound we'll get a 404 here, which needs to happen because - # get_all_by_resource_provider can return an empty list. - # It is also needed for the generation, used in the outgoing - # representation. - try: - rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - _("No resource provider with uuid %(uuid)s found: %(error)s") % - {'uuid': uuid, 'error': exc}) - - traits = rp_obj.TraitList.get_all_by_resource_provider(context, rp) - response_body, last_modified = _serialize_traits(traits, want_version) - response_body["resource_provider_generation"] = rp.generation - - if want_version.matches((1, 15)): - req.response.last_modified = last_modified - req.response.cache_control = 'no-cache' - - req.response.status = 200 - req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body)) - req.response.content_type = 'application/json' - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.6') -@util.require_content('application/json') -def update_traits_for_resource_provider(req): - context = req.environ['placement.context'] - context.can(policies.RP_TRAIT_UPDATE) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - uuid = util.wsgi_path_item(req.environ, 'uuid') - data = util.extract_json(req.body, schema.SET_TRAITS_FOR_RP_SCHEMA) - rp_gen = data['resource_provider_generation'] - traits = data['traits'] - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - - if resource_provider.generation != rp_gen: - raise webob.exc.HTTPConflict( - _("Resource provider's generation already changed. Please update " - "the generation and try again."), - json_formatter=util.json_error_formatter, - comment=errors.CONCURRENT_UPDATE) - - trait_objs = rp_obj.TraitList.get_all( - context, filters={'name_in': traits}) - traits_name = set([obj.name for obj in trait_objs]) - non_existed_trait = set(traits) - set(traits_name) - if non_existed_trait: - raise webob.exc.HTTPBadRequest( - _("No such trait %s") % ', '.join(non_existed_trait)) - - resource_provider.set_traits(trait_objs) - - response_body, last_modified = _serialize_traits(trait_objs, want_version) - response_body[ - 'resource_provider_generation'] = resource_provider.generation - if want_version.matches((1, 15)): - req.response.last_modified = last_modified - req.response.cache_control = 'no-cache' - req.response.status = 200 - req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body)) - req.response.content_type = 'application/json' - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.6') -def delete_traits_for_resource_provider(req): - context = req.environ['placement.context'] - context.can(policies.RP_TRAIT_DELETE) - uuid = util.wsgi_path_item(req.environ, 'uuid') - - resource_provider = rp_obj.ResourceProvider.get_by_uuid(context, uuid) - try: - resource_provider.set_traits(rp_obj.TraitList(objects=[])) - except exception.ConcurrentUpdateDetected as e: - raise webob.exc.HTTPConflict(e.format_message(), - comment=errors.CONCURRENT_UPDATE) - - req.response.status = 204 - req.response.content_type = None - return req.response diff --git a/nova/api/openstack/placement/handlers/usage.py b/nova/api/openstack/placement/handlers/usage.py deleted file mode 100644 index 85213302d46..00000000000 --- a/nova/api/openstack/placement/handlers/usage.py +++ /dev/null @@ -1,120 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API handlers for usage information.""" - -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import timeutils -import webob - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.policies import usage as policies -from nova.api.openstack.placement.schemas import usage as schema -from nova.api.openstack.placement import util -from nova.api.openstack.placement import wsgi_wrapper -from nova.i18n import _ - - -def _serialize_usages(resource_provider, usage): - usage_dict = {resource.resource_class: resource.usage - for resource in usage} - return {'resource_provider_generation': resource_provider.generation, - 'usages': usage_dict} - - -@wsgi_wrapper.PlacementWsgify -@util.check_accept('application/json') -def list_usages(req): - """GET a dictionary of resource provider usage by resource class. - - If the resource provider does not exist return a 404. - - On success return a 200 with an application/json representation of - the usage dictionary. - """ - context = req.environ['placement.context'] - context.can(policies.PROVIDER_USAGES) - uuid = util.wsgi_path_item(req.environ, 'uuid') - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - - # Resource provider object needed for two things: If it is - # NotFound we'll get a 404 here, which needs to happen because - # get_all_by_resource_provider_uuid can return an empty list. - # It is also needed for the generation, used in the outgoing - # representation. - try: - resource_provider = rp_obj.ResourceProvider.get_by_uuid( - context, uuid) - except exception.NotFound as exc: - raise webob.exc.HTTPNotFound( - _("No resource provider with uuid %(uuid)s found: %(error)s") % - {'uuid': uuid, 'error': exc}) - - usage = rp_obj.UsageList.get_all_by_resource_provider_uuid( - context, uuid) - - response = req.response - response.body = encodeutils.to_utf8(jsonutils.dumps( - _serialize_usages(resource_provider, usage))) - req.response.content_type = 'application/json' - if want_version.matches((1, 15)): - req.response.cache_control = 'no-cache' - # While it would be possible to generate a last-modified time - # based on the collection of allocations that result in a usage - # value (with some spelunking in the SQL) that doesn't align with - # the question that is being asked in a request for usages: What - # is the usage, now? So the last-modified time is set to utcnow. - req.response.last_modified = timeutils.utcnow(with_timezone=True) - return req.response - - -@wsgi_wrapper.PlacementWsgify -@microversion.version_handler('1.9') -@util.check_accept('application/json') -def get_total_usages(req): - """GET the sum of usages for a project or a project/user. - - On success return a 200 and an application/json body representing the - sum/total of usages. - Return 404 Not Found if the wanted microversion does not match. - """ - context = req.environ['placement.context'] - # TODO(mriedem): When we support non-admins to use GET /usages we - # should pass the project_id (and user_id?) from the query parameters - # into context.can() for the target. - context.can(policies.TOTAL_USAGES) - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - - util.validate_query_params(req, schema.GET_USAGES_SCHEMA_1_9) - - project_id = req.GET.get('project_id') - user_id = req.GET.get('user_id') - - usages = rp_obj.UsageList.get_all_by_project_user(context, project_id, - user_id=user_id) - - response = req.response - usages_dict = {'usages': {resource.resource_class: resource.usage - for resource in usages}} - response.body = encodeutils.to_utf8(jsonutils.dumps(usages_dict)) - req.response.content_type = 'application/json' - if want_version.matches((1, 15)): - req.response.cache_control = 'no-cache' - # While it would be possible to generate a last-modified time - # based on the collection of allocations that result in a usage - # value (with some spelunking in the SQL) that doesn't align with - # the question that is being asked in a request for usages: What - # is the usage, now? So the last-modified time is set to utcnow. - req.response.last_modified = timeutils.utcnow(with_timezone=True) - return req.response diff --git a/nova/api/openstack/placement/lib.py b/nova/api/openstack/placement/lib.py deleted file mode 100644 index 58c230db7b1..00000000000 --- a/nova/api/openstack/placement/lib.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Symbols intended to be imported by both placement code and placement API -consumers. When placement is separated out, this module should be part of a -common library that both placement and its consumers can require.""" - - -class RequestGroup(object): - def __init__(self, use_same_provider=True, resources=None, - required_traits=None, forbidden_traits=None, member_of=None): - """Create a grouping of resource and trait requests. - - :param use_same_provider: - If True, (the default) this RequestGroup represents requests for - resources and traits which must be satisfied by a single resource - provider. If False, represents a request for resources and traits - in any resource provider in the same tree, or a sharing provider. - :param resources: A dict of { resource_class: amount, ... } - :param required_traits: A set of { trait_name, ... } - :param forbidden_traits: A set of { trait_name, ... } - :param member_of: A list of [ [aggregate_UUID], - [aggregate_UUID, aggregate_UUID] ... ] - """ - self.use_same_provider = use_same_provider - self.resources = resources or {} - self.required_traits = required_traits or set() - self.forbidden_traits = forbidden_traits or set() - self.member_of = member_of or [] - - def __str__(self): - ret = 'RequestGroup(use_same_provider=%s' % str(self.use_same_provider) - ret += ', resources={%s}' % ', '.join( - '%s:%d' % (rc, amount) - for rc, amount in sorted(list(self.resources.items()))) - ret += ', traits=[%s]' % ', '.join( - sorted(self.required_traits) + - ['!%s' % ft for ft in self.forbidden_traits]) - ret += ', aggregates=[%s]' % ', '.join( - sorted('[%s]' % ', '.join(agglist) - for agglist in sorted(self.member_of))) - ret += ')' - return ret diff --git a/nova/api/openstack/placement/microversion.py b/nova/api/openstack/placement/microversion.py deleted file mode 100644 index ec88d86068e..00000000000 --- a/nova/api/openstack/placement/microversion.py +++ /dev/null @@ -1,170 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Microversion handling.""" - -# NOTE(cdent): This code is taken from enamel: -# https://github.com/jaypipes/enamel and was the original source of -# the code now used in microversion_parse library. - -import collections -import inspect - -import microversion_parse -import webob - - -SERVICE_TYPE = 'placement' -MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE -VERSIONED_METHODS = collections.defaultdict(list) - -# The Canonical Version List -VERSIONS = [ - '1.0', - '1.1', # initial support for aggregate.get_aggregates and set_aggregates - '1.2', # Adds /resource_classes resource endpoint - '1.3', # Adds 'member_of' query parameter to get resource providers - # that are members of any of the listed aggregates - '1.4', # Adds resources query string parameter in GET /resource_providers - '1.5', # Adds DELETE /resource_providers/{uuid}/inventories - '1.6', # Adds /traits and /resource_providers{uuid}/traits resource - # endpoints - '1.7', # PUT /resource_classes/{name} is bodiless create or update - '1.8', # Adds 'project_id' and 'user_id' required request parameters to - # PUT /allocations - '1.9', # Adds GET /usages - '1.10', # Adds GET /allocation_candidates resource endpoint - '1.11', # Adds 'allocations' link to the GET /resource_providers response - '1.12', # Add project_id and user_id to GET /allocations/{consumer_uuid} - # and PUT to /allocations/{consumer_uuid} in the same dict form - # as GET. The 'allocation_requests' format in GET - # /allocation_candidates is updated to be the same as well. - '1.13', # Adds POST /allocations to set allocations for multiple consumers - '1.14', # Adds parent and root provider UUID on resource provider - # representation and 'in_tree' filter on GET /resource_providers - '1.15', # Include last-modified and cache-control headers - '1.16', # Add 'limit' query parameter to GET /allocation_candidates - '1.17', # Add 'required' query parameter to GET /allocation_candidates and - # return traits in the provider summary. - '1.18', # Support ?required= queryparam on GET /resource_providers - '1.19', # Include generation and conflict detection in provider aggregates - # APIs - '1.20', # Return 200 with provider payload from POST /resource_providers - '1.21', # Support ?member_of=in: queryparam on - # GET /allocation_candidates - '1.22', # Support forbidden traits in the required parameter of - # GET /resource_providers and GET /allocation_candidates - '1.23', # Add support for error codes in error response JSON - '1.24', # Support multiple ?member_of= queryparams on - # GET /resource_providers - '1.25', # Adds support for granular resource requests via numbered - # querystring groups in GET /allocation_candidates - '1.26', # Add ability to specify inventory with reserved value equal to - # total. - '1.27', # Include all resource class inventories in `provider_summaries` - # field in response of `GET /allocation_candidates` API even if - # the resource class is not in the requested resources. - '1.28', # Add support for consumer generation - '1.29', # Support nested providers in GET /allocation_candidates API. -] - - -def max_version_string(): - return VERSIONS[-1] - - -def min_version_string(): - return VERSIONS[0] - - -# From twisted -# https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py -def _fully_qualified_name(obj): - """Return the fully qualified name of a module, class, method or function. - - Classes and functions need to be module level ones to be correctly - qualified. - """ - try: - name = obj.__qualname__ - except AttributeError: - name = obj.__name__ - - if inspect.isclass(obj) or inspect.isfunction(obj): - moduleName = obj.__module__ - return "%s.%s" % (moduleName, name) - elif inspect.ismethod(obj): - try: - cls = obj.im_class - except AttributeError: - # Python 3 eliminates im_class, substitutes __module__ and - # __qualname__ to provide similar information. - return "%s.%s" % (obj.__module__, obj.__qualname__) - else: - className = _fully_qualified_name(cls) - return "%s.%s" % (className, name) - return name - - -def _find_method(f, version, status_code): - """Look in VERSIONED_METHODS for method with right name matching version. - - If no match is found a HTTPError corresponding to status_code will - be returned. - """ - qualified_name = _fully_qualified_name(f) - # A KeyError shouldn't be possible here, but let's be robust - # just in case. - method_list = VERSIONED_METHODS.get(qualified_name, []) - for min_version, max_version, func in method_list: - if min_version <= version <= max_version: - return func - - raise webob.exc.status_map[status_code] - - -def version_handler(min_ver, max_ver=None, status_code=404): - """Decorator for versioning API methods. - - Add as a decorator to a placement API handler to constrain - the microversions at which it will run. Add after the - ``wsgify`` decorator. - - This does not check for version intersections. That's the - domain of tests. - - :param min_ver: A string of two numerals, X.Y indicating the - minimum version allowed for the decorated method. - :param max_ver: A string of two numerals, X.Y, indicating the - maximum version allowed for the decorated method. - :param status_code: A status code to indicate error, 404 by default - """ - def decorator(f): - min_version = microversion_parse.parse_version_string(min_ver) - if max_ver: - max_version = microversion_parse.parse_version_string(max_ver) - else: - max_version = microversion_parse.parse_version_string( - max_version_string()) - qualified_name = _fully_qualified_name(f) - VERSIONED_METHODS[qualified_name].append( - (min_version, max_version, f)) - - def decorated_func(req, *args, **kwargs): - version = req.environ[MICROVERSION_ENVIRON] - return _find_method(f, version, status_code)(req, *args, **kwargs) - - # Sort highest min version to beginning of list. - VERSIONED_METHODS[qualified_name].sort(key=lambda x: x[0], - reverse=True) - return decorated_func - return decorator diff --git a/nova/api/openstack/placement/objects/consumer.py b/nova/api/openstack/placement/objects/consumer.py deleted file mode 100644 index 75bd93dfbdb..00000000000 --- a/nova/api/openstack/placement/objects/consumer.py +++ /dev/null @@ -1,253 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db import exception as db_exc -from oslo_versionedobjects import base -from oslo_versionedobjects import fields -import sqlalchemy as sa - -from nova.api.openstack.placement import db_api -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import project as project_obj -from nova.api.openstack.placement.objects import user as user_obj -from nova.db.sqlalchemy import api_models as models - -CONSUMER_TBL = models.Consumer.__table__ -_ALLOC_TBL = models.Allocation.__table__ - - -@db_api.placement_context_manager.writer -def create_incomplete_consumers(ctx, batch_size): - """Finds all the consumer records that are missing for allocations and - creates consumer records for them, using the "incomplete consumer" project - and user CONF options. - - Returns a tuple containing two identical elements with the number of - consumer records created, since this is the expected return format for data - migration routines. - """ - # Create a record in the projects table for our incomplete project - incomplete_proj_id = project_obj.ensure_incomplete_project(ctx) - - # Create a record in the users table for our incomplete user - incomplete_user_id = user_obj.ensure_incomplete_user(ctx) - - # Create a consumer table record for all consumers where - # allocations.consumer_id doesn't exist in the consumers table. Use the - # incomplete consumer project and user ID. - alloc_to_consumer = sa.outerjoin( - _ALLOC_TBL, CONSUMER_TBL, - _ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid) - cols = [ - _ALLOC_TBL.c.consumer_id, - incomplete_proj_id, - incomplete_user_id, - ] - sel = sa.select(cols) - sel = sel.select_from(alloc_to_consumer) - sel = sel.where(CONSUMER_TBL.c.id.is_(None)) - sel = sel.limit(batch_size) - target_cols = ['uuid', 'project_id', 'user_id'] - ins_stmt = CONSUMER_TBL.insert().from_select(target_cols, sel) - res = ctx.session.execute(ins_stmt) - return res.rowcount, res.rowcount - - -@db_api.placement_context_manager.writer -def delete_consumers_if_no_allocations(ctx, consumer_uuids): - """Looks to see if any of the supplied consumers has any allocations and if - not, deletes the consumer record entirely. - - :param ctx: `nova.api.openstack.placement.context.RequestContext` that - contains an oslo_db Session - :param consumer_uuids: UUIDs of the consumers to check and maybe delete - """ - # Delete consumers that are not referenced in the allocations table - cons_to_allocs_join = sa.outerjoin( - CONSUMER_TBL, _ALLOC_TBL, - CONSUMER_TBL.c.uuid == _ALLOC_TBL.c.consumer_id) - subq = sa.select([CONSUMER_TBL.c.uuid]).select_from(cons_to_allocs_join) - subq = subq.where(sa.and_( - _ALLOC_TBL.c.consumer_id.is_(None), - CONSUMER_TBL.c.uuid.in_(consumer_uuids))) - no_alloc_consumers = [r[0] for r in ctx.session.execute(subq).fetchall()] - del_stmt = CONSUMER_TBL.delete() - del_stmt = del_stmt.where(CONSUMER_TBL.c.uuid.in_(no_alloc_consumers)) - ctx.session.execute(del_stmt) - - -@db_api.placement_context_manager.reader -def _get_consumer_by_uuid(ctx, uuid): - # The SQL for this looks like the following: - # SELECT - # c.id, c.uuid, - # p.id AS project_id, p.external_id AS project_external_id, - # u.id AS user_id, u.external_id AS user_external_id, - # c.updated_at, c.created_at - # FROM consumers c - # INNER JOIN projects p - # ON c.project_id = p.id - # INNER JOIN users u - # ON c.user_id = u.id - # WHERE c.uuid = $uuid - consumers = sa.alias(CONSUMER_TBL, name="c") - projects = sa.alias(project_obj.PROJECT_TBL, name="p") - users = sa.alias(user_obj.USER_TBL, name="u") - cols = [ - consumers.c.id, - consumers.c.uuid, - projects.c.id.label("project_id"), - projects.c.external_id.label("project_external_id"), - users.c.id.label("user_id"), - users.c.external_id.label("user_external_id"), - consumers.c.generation, - consumers.c.updated_at, - consumers.c.created_at - ] - c_to_p_join = sa.join( - consumers, projects, consumers.c.project_id == projects.c.id) - c_to_u_join = sa.join( - c_to_p_join, users, consumers.c.user_id == users.c.id) - sel = sa.select(cols).select_from(c_to_u_join) - sel = sel.where(consumers.c.uuid == uuid) - res = ctx.session.execute(sel).fetchone() - if not res: - raise exception.ConsumerNotFound(uuid=uuid) - - return dict(res) - - -@db_api.placement_context_manager.writer -def _increment_consumer_generation(ctx, consumer): - """Increments the supplied consumer's generation value, supplying the - consumer object which contains the currently-known generation. Returns the - newly-incremented generation. - - :param ctx: `nova.context.RequestContext` that contains an oslo_db Session - :param consumer: `Consumer` whose generation should be updated. - :returns: The newly-incremented generation. - :raises nova.exception.ConcurrentUpdateDetected: if another thread updated - the same consumer's view of its allocations in between the time - when this object was originally read and the call which modified - the consumer's state (e.g. replacing allocations for a consumer) - """ - consumer_gen = consumer.generation - new_generation = consumer_gen + 1 - upd_stmt = CONSUMER_TBL.update().where(sa.and_( - CONSUMER_TBL.c.id == consumer.id, - CONSUMER_TBL.c.generation == consumer_gen)).values( - generation=new_generation) - - res = ctx.session.execute(upd_stmt) - if res.rowcount != 1: - raise exception.ConcurrentUpdateDetected - return new_generation - - -@db_api.placement_context_manager.writer -def _delete_consumer(ctx, consumer): - """Deletes the supplied consumer. - - :param ctx: `nova.context.RequestContext` that contains an oslo_db Session - :param consumer: `Consumer` whose generation should be updated. - """ - del_stmt = CONSUMER_TBL.delete().where(CONSUMER_TBL.c.id == consumer.id) - ctx.session.execute(del_stmt) - - -@base.VersionedObjectRegistry.register_if(False) -class Consumer(base.VersionedObject, base.TimestampedObject): - - fields = { - 'id': fields.IntegerField(read_only=True), - 'uuid': fields.UUIDField(nullable=False), - 'project': fields.ObjectField('Project', nullable=False), - 'user': fields.ObjectField('User', nullable=False), - 'generation': fields.IntegerField(nullable=False), - } - - @staticmethod - def _from_db_object(ctx, target, source): - target.id = source['id'] - target.uuid = source['uuid'] - target.generation = source['generation'] - target.created_at = source['created_at'] - target.updated_at = source['updated_at'] - - target.project = project_obj.Project( - ctx, id=source['project_id'], - external_id=source['project_external_id']) - target.user = user_obj.User( - ctx, id=source['user_id'], - external_id=source['user_external_id']) - - target._context = ctx - target.obj_reset_changes() - return target - - @classmethod - def get_by_uuid(cls, ctx, uuid): - res = _get_consumer_by_uuid(ctx, uuid) - return cls._from_db_object(ctx, cls(ctx), res) - - def create(self): - @db_api.placement_context_manager.writer - def _create_in_db(ctx): - db_obj = models.Consumer( - uuid=self.uuid, project_id=self.project.id, - user_id=self.user.id) - try: - db_obj.save(ctx.session) - # NOTE(jaypipes): We don't do the normal _from_db_object() - # thing here because models.Consumer doesn't have a - # project_external_id or user_external_id attribute. - self.id = db_obj.id - self.generation = db_obj.generation - except db_exc.DBDuplicateEntry: - raise exception.ConsumerExists(uuid=self.uuid) - _create_in_db(self._context) - self.obj_reset_changes() - - def update(self): - """Used to update the consumer's project and user information without - incrementing the consumer's generation. - """ - @db_api.placement_context_manager.writer - def _update_in_db(ctx): - upd_stmt = CONSUMER_TBL.update().values( - project_id=self.project.id, user_id=self.user.id) - # NOTE(jaypipes): We add the generation check to the WHERE clause - # above just for safety. We don't need to check that the statement - # actually updated a single row. If it did not, then the - # consumer.increment_generation() call that happens in - # AllocationList.replace_all() will end up raising - # ConcurrentUpdateDetected anyway - upd_stmt = upd_stmt.where(sa.and_( - CONSUMER_TBL.c.id == self.id, - CONSUMER_TBL.c.generation == self.generation)) - ctx.session.execute(upd_stmt) - _update_in_db(self._context) - self.obj_reset_changes() - - def increment_generation(self): - """Increments the consumer's generation. - - :raises nova.exception.ConcurrentUpdateDetected: if another thread - updated the same consumer's view of its allocations in between the - time when this object was originally read and the call which - modified the consumer's state (e.g. replacing allocations for a - consumer) - """ - self.generation = _increment_consumer_generation(self._context, self) - - def delete(self): - _delete_consumer(self._context, self) diff --git a/nova/api/openstack/placement/objects/project.py b/nova/api/openstack/placement/objects/project.py deleted file mode 100644 index a6742da2fdc..00000000000 --- a/nova/api/openstack/placement/objects/project.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_versionedobjects import base -from oslo_versionedobjects import fields -import sqlalchemy as sa - -from nova.api.openstack.placement import db_api -from nova.api.openstack.placement import exception -from nova.db.sqlalchemy import api_models as models - -CONF = cfg.CONF -PROJECT_TBL = models.Project.__table__ - - -@db_api.placement_context_manager.writer -def ensure_incomplete_project(ctx): - """Ensures that a project record is created for the "incomplete consumer - project". Returns the internal ID of that record. - """ - incomplete_id = CONF.placement.incomplete_consumer_project_id - sel = sa.select([PROJECT_TBL.c.id]).where( - PROJECT_TBL.c.external_id == incomplete_id) - res = ctx.session.execute(sel).fetchone() - if res: - return res[0] - ins = PROJECT_TBL.insert().values(external_id=incomplete_id) - res = ctx.session.execute(ins) - return res.inserted_primary_key[0] - - -@db_api.placement_context_manager.reader -def _get_project_by_external_id(ctx, external_id): - projects = sa.alias(PROJECT_TBL, name="p") - cols = [ - projects.c.id, - projects.c.external_id, - projects.c.updated_at, - projects.c.created_at - ] - sel = sa.select(cols) - sel = sel.where(projects.c.external_id == external_id) - res = ctx.session.execute(sel).fetchone() - if not res: - raise exception.ProjectNotFound(external_id=external_id) - - return dict(res) - - -@base.VersionedObjectRegistry.register_if(False) -class Project(base.VersionedObject): - - fields = { - 'id': fields.IntegerField(read_only=True), - 'external_id': fields.StringField(nullable=False), - } - - @staticmethod - def _from_db_object(ctx, target, source): - for field in target.fields: - setattr(target, field, source[field]) - - target._context = ctx - target.obj_reset_changes() - return target - - @classmethod - def get_by_external_id(cls, ctx, external_id): - res = _get_project_by_external_id(ctx, external_id) - return cls._from_db_object(ctx, cls(ctx), res) - - def create(self): - @db_api.placement_context_manager.writer - def _create_in_db(ctx): - db_obj = models.Project(external_id=self.external_id) - try: - db_obj.save(ctx.session) - except db_exc.DBDuplicateEntry: - raise exception.ProjectExists(external_id=self.external_id) - self._from_db_object(ctx, self, db_obj) - _create_in_db(self._context) diff --git a/nova/api/openstack/placement/objects/resource_provider.py b/nova/api/openstack/placement/objects/resource_provider.py deleted file mode 100644 index 3fb1fed54b4..00000000000 --- a/nova/api/openstack/placement/objects/resource_provider.py +++ /dev/null @@ -1,4207 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import copy -import itertools -import random - -# NOTE(cdent): The resource provider objects are designed to never be -# used over RPC. Remote manipulation is done with the placement HTTP -# API. The 'remotable' decorators should not be used, the objects should -# not be registered and there is no need to express VERSIONs nor handle -# obj_make_compatible. - -import os_traits -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_db import api as oslo_db_api -from oslo_db import exception as db_exc -from oslo_log import log as logging -from oslo_utils import encodeutils -from oslo_versionedobjects import base -from oslo_versionedobjects import fields -import six -import sqlalchemy as sa -from sqlalchemy import exc as sqla_exc -from sqlalchemy import func -from sqlalchemy import sql -from sqlalchemy.sql import null - -from nova.api.openstack.placement import db_api -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import consumer as consumer_obj -from nova.api.openstack.placement.objects import project as project_obj -from nova.api.openstack.placement.objects import user as user_obj -from nova.api.openstack.placement import resource_class_cache as rc_cache -from nova.db.sqlalchemy import api_models as models -from nova.i18n import _ -from nova import rc_fields - -_TRAIT_TBL = models.Trait.__table__ -_ALLOC_TBL = models.Allocation.__table__ -_INV_TBL = models.Inventory.__table__ -_RP_TBL = models.ResourceProvider.__table__ -# Not used in this file but used in tests. -_RC_TBL = models.ResourceClass.__table__ -_AGG_TBL = models.PlacementAggregate.__table__ -_RP_AGG_TBL = models.ResourceProviderAggregate.__table__ -_RP_TRAIT_TBL = models.ResourceProviderTrait.__table__ -_PROJECT_TBL = models.Project.__table__ -_USER_TBL = models.User.__table__ -_CONSUMER_TBL = models.Consumer.__table__ -_RC_CACHE = None -_TRAIT_LOCK = 'trait_sync' -_TRAITS_SYNCED = False - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -@db_api.placement_context_manager.reader -def ensure_rc_cache(ctx): - """Ensures that a singleton resource class cache has been created in the - module's scope. - - :param ctx: `nova.context.RequestContext` that may be used to grab a DB - connection. - """ - global _RC_CACHE - if _RC_CACHE is not None: - return - _RC_CACHE = rc_cache.ResourceClassCache(ctx) - - -@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -# Bug #1760322: If the caller raises an exception, we don't want the trait -# sync rolled back; so use an .independent transaction -@db_api.placement_context_manager.writer.independent -def _trait_sync(ctx): - """Sync the os_traits symbols to the database. - - Reads all symbols from the os_traits library, checks if any of them do - not exist in the database and bulk-inserts those that are not. This is - done once per process using this code if either Trait.get_by_name or - TraitList.get_all is called. - - :param ctx: `nova.context.RequestContext` that may be used to grab a DB - connection. - """ - # Create a set of all traits in the os_traits library. - std_traits = set(os_traits.get_traits()) - sel = sa.select([_TRAIT_TBL.c.name]) - res = ctx.session.execute(sel).fetchall() - # Create a set of all traits in the db that are not custom - # traits. - db_traits = set( - r[0] for r in res - if not os_traits.is_custom(r[0]) - ) - # Determine those traits which are in os_traits but not - # currently in the database, and insert them. - need_sync = std_traits - db_traits - ins = _TRAIT_TBL.insert() - batch_args = [ - {'name': six.text_type(trait)} - for trait in need_sync - ] - if batch_args: - try: - ctx.session.execute(ins, batch_args) - LOG.info("Synced traits from os_traits into API DB: %s", - need_sync) - except db_exc.DBDuplicateEntry: - pass # some other process sync'd, just ignore - - -def ensure_trait_sync(ctx): - """Ensures that the os_traits library is synchronized to the traits db. - - If _TRAITS_SYNCED is False then this process has not tried to update the - traits db. Do so by calling _trait_sync. Since the placement API server - could be multi-threaded, lock around testing _TRAITS_SYNCED to avoid - duplicating work. - - Different placement API server processes that talk to the same database - will avoid issues through the power of transactions. - - :param ctx: `nova.context.RequestContext` that may be used to grab a DB - connection. - """ - global _TRAITS_SYNCED - # If another thread is doing this work, wait for it to complete. - # When that thread is done _TRAITS_SYNCED will be true in this - # thread and we'll simply return. - with lockutils.lock(_TRAIT_LOCK): - if not _TRAITS_SYNCED: - _trait_sync(ctx) - _TRAITS_SYNCED = True - - -def _get_current_inventory_resources(ctx, rp): - """Returns a set() containing the resource class IDs for all resources - currently having an inventory record for the supplied resource provider. - - :param ctx: `nova.context.RequestContext` that may be used to grab a DB - connection. - :param rp: Resource provider to query inventory for. - """ - cur_res_sel = sa.select([_INV_TBL.c.resource_class_id]).where( - _INV_TBL.c.resource_provider_id == rp.id) - existing_resources = ctx.session.execute(cur_res_sel).fetchall() - return set([r[0] for r in existing_resources]) - - -def _delete_inventory_from_provider(ctx, rp, to_delete): - """Deletes any inventory records from the supplied provider and set() of - resource class identifiers. - - If there are allocations for any of the inventories to be deleted raise - InventoryInUse exception. - - :param ctx: `nova.context.RequestContext` that contains an oslo_db Session - :param rp: Resource provider from which to delete inventory. - :param to_delete: set() containing resource class IDs for records to - delete. - """ - allocation_query = sa.select( - [_ALLOC_TBL.c.resource_class_id.label('resource_class')]).where( - sa.and_(_ALLOC_TBL.c.resource_provider_id == rp.id, - _ALLOC_TBL.c.resource_class_id.in_(to_delete)) - ).group_by(_ALLOC_TBL.c.resource_class_id) - allocations = ctx.session.execute(allocation_query).fetchall() - if allocations: - resource_classes = ', '.join([_RC_CACHE.string_from_id(alloc[0]) - for alloc in allocations]) - raise exception.InventoryInUse(resource_classes=resource_classes, - resource_provider=rp.uuid) - - del_stmt = _INV_TBL.delete().where(sa.and_( - _INV_TBL.c.resource_provider_id == rp.id, - _INV_TBL.c.resource_class_id.in_(to_delete))) - res = ctx.session.execute(del_stmt) - return res.rowcount - - -def _add_inventory_to_provider(ctx, rp, inv_list, to_add): - """Inserts new inventory records for the supplied resource provider. - - :param ctx: `nova.context.RequestContext` that contains an oslo_db Session - :param rp: Resource provider to add inventory to. - :param inv_list: InventoryList object - :param to_add: set() containing resource class IDs to search inv_list for - adding to resource provider. - """ - for rc_id in to_add: - rc_str = _RC_CACHE.string_from_id(rc_id) - inv_record = inv_list.find(rc_str) - ins_stmt = _INV_TBL.insert().values( - resource_provider_id=rp.id, - resource_class_id=rc_id, - total=inv_record.total, - reserved=inv_record.reserved, - min_unit=inv_record.min_unit, - max_unit=inv_record.max_unit, - step_size=inv_record.step_size, - allocation_ratio=inv_record.allocation_ratio) - ctx.session.execute(ins_stmt) - - -def _update_inventory_for_provider(ctx, rp, inv_list, to_update): - """Updates existing inventory records for the supplied resource provider. - - :param ctx: `nova.context.RequestContext` that contains an oslo_db Session - :param rp: Resource provider on which to update inventory. - :param inv_list: InventoryList object - :param to_update: set() containing resource class IDs to search inv_list - for updating in resource provider. - :returns: A list of (uuid, class) tuples that have exceeded their - capacity after this inventory update. - """ - exceeded = [] - for rc_id in to_update: - rc_str = _RC_CACHE.string_from_id(rc_id) - inv_record = inv_list.find(rc_str) - allocation_query = sa.select( - [func.sum(_ALLOC_TBL.c.used).label('usage')]).\ - where(sa.and_( - _ALLOC_TBL.c.resource_provider_id == rp.id, - _ALLOC_TBL.c.resource_class_id == rc_id)) - allocations = ctx.session.execute(allocation_query).first() - if (allocations - and allocations['usage'] is not None - and allocations['usage'] > inv_record.capacity): - exceeded.append((rp.uuid, rc_str)) - upd_stmt = _INV_TBL.update().where(sa.and_( - _INV_TBL.c.resource_provider_id == rp.id, - _INV_TBL.c.resource_class_id == rc_id)).values( - total=inv_record.total, - reserved=inv_record.reserved, - min_unit=inv_record.min_unit, - max_unit=inv_record.max_unit, - step_size=inv_record.step_size, - allocation_ratio=inv_record.allocation_ratio) - res = ctx.session.execute(upd_stmt) - if not res.rowcount: - raise exception.InventoryWithResourceClassNotFound( - resource_class=rc_str) - return exceeded - - -def _increment_provider_generation(ctx, rp): - """Increments the supplied provider's generation value, supplying the - currently-known generation. Returns whether the increment succeeded. - - :param ctx: `nova.context.RequestContext` that contains an oslo_db Session - :param rp: `ResourceProvider` whose generation should be updated. - :returns: The new resource provider generation value if successful. - :raises nova.exception.ConcurrentUpdateDetected: if another thread updated - the same resource provider's view of its inventory or allocations - in between the time when this object was originally read - and the call to set the inventory. - """ - rp_gen = rp.generation - new_generation = rp_gen + 1 - upd_stmt = _RP_TBL.update().where(sa.and_( - _RP_TBL.c.id == rp.id, - _RP_TBL.c.generation == rp_gen)).values( - generation=(new_generation)) - - res = ctx.session.execute(upd_stmt) - if res.rowcount != 1: - raise exception.ResourceProviderConcurrentUpdateDetected() - return new_generation - - -@db_api.placement_context_manager.writer -def _add_inventory(context, rp, inventory): - """Add one Inventory that wasn't already on the provider. - - :raises `exception.ResourceClassNotFound` if inventory.resource_class - cannot be found in either the standard classes or the DB. - """ - rc_id = _RC_CACHE.id_from_string(inventory.resource_class) - inv_list = InventoryList(objects=[inventory]) - _add_inventory_to_provider( - context, rp, inv_list, set([rc_id])) - rp.generation = _increment_provider_generation(context, rp) - - -@db_api.placement_context_manager.writer -def _update_inventory(context, rp, inventory): - """Update an inventory already on the provider. - - :raises `exception.ResourceClassNotFound` if inventory.resource_class - cannot be found in either the standard classes or the DB. - """ - rc_id = _RC_CACHE.id_from_string(inventory.resource_class) - inv_list = InventoryList(objects=[inventory]) - exceeded = _update_inventory_for_provider( - context, rp, inv_list, set([rc_id])) - rp.generation = _increment_provider_generation(context, rp) - return exceeded - - -@db_api.placement_context_manager.writer -def _delete_inventory(context, rp, resource_class): - """Delete up to one Inventory of the given resource_class string. - - :raises `exception.ResourceClassNotFound` if resource_class - cannot be found in either the standard classes or the DB. - """ - rc_id = _RC_CACHE.id_from_string(resource_class) - if not _delete_inventory_from_provider(context, rp, [rc_id]): - raise exception.NotFound( - 'No inventory of class %s found for delete' - % resource_class) - rp.generation = _increment_provider_generation(context, rp) - - -@db_api.placement_context_manager.writer -def _set_inventory(context, rp, inv_list): - """Given an InventoryList object, replaces the inventory of the - resource provider in a safe, atomic fashion using the resource - provider's generation as a consistent view marker. - - :param context: Nova RequestContext. - :param rp: `ResourceProvider` object upon which to set inventory. - :param inv_list: `InventoryList` object to save to backend storage. - :returns: A list of (uuid, class) tuples that have exceeded their - capacity after this inventory update. - :raises nova.exception.ConcurrentUpdateDetected: if another thread updated - the same resource provider's view of its inventory or allocations - in between the time when this object was originally read - and the call to set the inventory. - :raises `exception.ResourceClassNotFound` if any resource class in any - inventory in inv_list cannot be found in either the standard - classes or the DB. - :raises `exception.InventoryInUse` if we attempt to delete inventory - from a provider that has allocations for that resource class. - """ - existing_resources = _get_current_inventory_resources(context, rp) - these_resources = set([_RC_CACHE.id_from_string(r.resource_class) - for r in inv_list.objects]) - - # Determine which resources we should be adding, deleting and/or - # updating in the resource provider's inventory by comparing sets - # of resource class identifiers. - to_add = these_resources - existing_resources - to_delete = existing_resources - these_resources - to_update = these_resources & existing_resources - exceeded = [] - - if to_delete: - _delete_inventory_from_provider(context, rp, to_delete) - if to_add: - _add_inventory_to_provider(context, rp, inv_list, to_add) - if to_update: - exceeded = _update_inventory_for_provider(context, rp, inv_list, - to_update) - - # Here is where we update the resource provider's generation value. If - # this update updates zero rows, that means that another thread has updated - # the inventory for this resource provider between the time the caller - # originally read the resource provider record and inventory information - # and this point. We raise an exception here which will rollback the above - # transaction and return an error to the caller to indicate that they can - # attempt to retry the inventory save after reverifying any capacity - # conditions and re-reading the existing inventory information. - rp.generation = _increment_provider_generation(context, rp) - - return exceeded - - -@db_api.placement_context_manager.reader -def _get_provider_by_uuid(context, uuid): - """Given a UUID, return a dict of information about the resource provider - from the database. - - :raises: NotFound if no such provider was found - :param uuid: The UUID to look up - """ - rpt = sa.alias(_RP_TBL, name="rp") - parent = sa.alias(_RP_TBL, name="parent") - root = sa.alias(_RP_TBL, name="root") - # TODO(jaypipes): Change this to an inner join when we are sure all - # root_provider_id values are NOT NULL - rp_to_root = sa.outerjoin(rpt, root, rpt.c.root_provider_id == root.c.id) - rp_to_parent = sa.outerjoin(rp_to_root, parent, - rpt.c.parent_provider_id == parent.c.id) - cols = [ - rpt.c.id, - rpt.c.uuid, - rpt.c.name, - rpt.c.generation, - root.c.uuid.label("root_provider_uuid"), - parent.c.uuid.label("parent_provider_uuid"), - rpt.c.updated_at, - rpt.c.created_at, - ] - sel = sa.select(cols).select_from(rp_to_parent).where(rpt.c.uuid == uuid) - res = context.session.execute(sel).fetchone() - if not res: - raise exception.NotFound( - 'No resource provider with uuid %s found' % uuid) - return dict(res) - - -@db_api.placement_context_manager.reader -def _get_aggregates_by_provider_id(context, rp_id): - join_statement = sa.join( - _AGG_TBL, _RP_AGG_TBL, sa.and_( - _AGG_TBL.c.id == _RP_AGG_TBL.c.aggregate_id, - _RP_AGG_TBL.c.resource_provider_id == rp_id)) - sel = sa.select([_AGG_TBL.c.uuid]).select_from(join_statement) - return [r[0] for r in context.session.execute(sel).fetchall()] - - -@db_api.placement_context_manager.reader -def _anchors_for_sharing_providers(context, rp_ids, get_id=False): - """Given a list of internal IDs of sharing providers, returns a set of - tuples of (sharing provider UUID, anchor provider UUID), where each of - anchor is the unique root provider of a tree associated with the same - aggregate as the sharing provider. (These are the providers that can - "anchor" a single AllocationRequest.) - - The sharing provider may or may not itself be part of a tree; in either - case, an entry for this root provider is included in the result. - - If the sharing provider is not part of any aggregate, the empty list is - returned. - - If get_id is True, it returns a set of tuples of (sharing provider ID, - anchor provider ID) instead. - """ - # SELECT sps.uuid, COALESCE(rps.uuid, shr_with_sps.uuid) - # FROM resource_providers AS sps - # INNER JOIN resource_provider_aggregates AS shr_aggs - # ON sps.id = shr_aggs.resource_provider_id - # INNER JOIN resource_provider_aggregates AS shr_with_sps_aggs - # ON shr_aggs.aggregate_id = shr_with_sps_aggs.aggregate_id - # INNER JOIN resource_providers AS shr_with_sps - # ON shr_with_sps_aggs.resource_provider_id = shr_with_sps.id - # LEFT JOIN resource_providers AS rps - # ON shr_with_sps.root_provider_id = rps.id - # WHERE sps.id IN $(RP_IDs) - rps = sa.alias(_RP_TBL, name='rps') - sps = sa.alias(_RP_TBL, name='sps') - shr_aggs = sa.alias(_RP_AGG_TBL, name='shr_aggs') - shr_with_sps_aggs = sa.alias(_RP_AGG_TBL, name='shr_with_sps_aggs') - shr_with_sps = sa.alias(_RP_TBL, name='shr_with_sps') - join_chain = sa.join( - sps, shr_aggs, sps.c.id == shr_aggs.c.resource_provider_id) - join_chain = sa.join( - join_chain, shr_with_sps_aggs, - shr_aggs.c.aggregate_id == shr_with_sps_aggs.c.aggregate_id) - join_chain = sa.join( - join_chain, shr_with_sps, - shr_with_sps_aggs.c.resource_provider_id == shr_with_sps.c.id) - if get_id: - # TODO(yikun): Change `func.coalesce(shr_with_sps.c.root_provider_id, - # shr_with_sps.c.id)` to `shr_with_sps.c.root_provider_id` when we are - # sure all root_provider_id values are NOT NULL - sel = sa.select([sps.c.id, func.coalesce( - shr_with_sps.c.root_provider_id, shr_with_sps.c.id)]) - else: - # TODO(efried): Change this to an inner join and change - # 'func.coalesce(rps.c.uuid, shr_with_sps.c.uuid)' to `rps.c.uuid` - # when we are sure all root_provider_id values are NOT NULL - join_chain = sa.outerjoin( - join_chain, rps, shr_with_sps.c.root_provider_id == rps.c.id) - sel = sa.select([sps.c.uuid, func.coalesce(rps.c.uuid, - shr_with_sps.c.uuid)]) - sel = sel.select_from(join_chain) - sel = sel.where(sps.c.id.in_(rp_ids)) - return set([(r[0], r[1]) for r in context.session.execute(sel).fetchall()]) - - -@db_api.placement_context_manager.writer -def _set_aggregates(context, resource_provider, provided_aggregates, - increment_generation=False): - rp_id = resource_provider.id - # When aggregate uuids are persisted no validation is done - # to ensure that they refer to something that has meaning - # elsewhere. It is assumed that code which makes use of the - # aggregates, later, will validate their fitness. - # TODO(cdent): At the moment we do not delete - # a PlacementAggregate that no longer has any associations - # with at least one resource provider. We may wish to do that - # to avoid bloat if it turns out we're creating a lot of noise. - # Not doing now to move things along. - provided_aggregates = set(provided_aggregates) - existing_aggregates = set(_get_aggregates_by_provider_id(context, rp_id)) - to_add = provided_aggregates - existing_aggregates - target_aggregates = list(provided_aggregates) - - # Create any aggregates that do not yet exist in - # PlacementAggregates. This is different from - # the set in existing_aggregates; those are aggregates for - # which there are associations for the resource provider - # at rp_id. The following loop checks for the existence of any - # aggregate with the provided uuid. In this way we only - # create a new row in the PlacementAggregate table if the - # aggregate uuid has never been seen before. Code further - # below will update the associations. - for agg_uuid in to_add: - found_agg = context.session.query(models.PlacementAggregate.uuid).\ - filter_by(uuid=agg_uuid).first() - if not found_agg: - new_aggregate = models.PlacementAggregate(uuid=agg_uuid) - try: - context.session.add(new_aggregate) - # Flush each aggregate to explicitly call the INSERT - # statement that could result in an integrity error - # if some other thread has added this agg_uuid. This - # also makes sure that the new aggregates have - # ids when the SELECT below happens. - context.session.flush() - except db_exc.DBDuplicateEntry: - # Something else has already added this agg_uuid - pass - - # Remove all aggregate associations so we can refresh them - # below. This means that all associations are added, but the - # aggregates themselves stay around. - context.session.query(models.ResourceProviderAggregate).filter_by( - resource_provider_id=rp_id).delete() - - # Set resource_provider_id, aggregate_id pairs to - # ResourceProviderAggregate table. - if target_aggregates: - select_agg_id = sa.select([rp_id, models.PlacementAggregate.id]).\ - where(models.PlacementAggregate.uuid.in_(target_aggregates)) - insert_aggregates = models.ResourceProviderAggregate.__table__.\ - insert().from_select(['resource_provider_id', 'aggregate_id'], - select_agg_id) - context.session.execute(insert_aggregates) - - if increment_generation: - resource_provider.generation = _increment_provider_generation( - context, resource_provider) - - -@db_api.placement_context_manager.reader -def _get_traits_by_provider_id(context, rp_id): - t = sa.alias(_TRAIT_TBL, name='t') - rpt = sa.alias(_RP_TRAIT_TBL, name='rpt') - - join_cond = sa.and_(t.c.id == rpt.c.trait_id, - rpt.c.resource_provider_id == rp_id) - join = sa.join(t, rpt, join_cond) - sel = sa.select([t.c.id, t.c.name, - t.c.created_at, t.c.updated_at]).select_from(join) - return [dict(r) for r in context.session.execute(sel).fetchall()] - - -def _add_traits_to_provider(ctx, rp_id, to_add): - """Adds trait associations to the provider with the supplied ID. - - :param ctx: `nova.context.RequestContext` that has an oslo_db Session - :param rp_id: Internal ID of the resource provider on which to add - trait associations - :param to_add: set() containing internal trait IDs for traits to add - """ - for trait_id in to_add: - try: - ins_stmt = _RP_TRAIT_TBL.insert().values( - resource_provider_id=rp_id, - trait_id=trait_id) - ctx.session.execute(ins_stmt) - except db_exc.DBDuplicateEntry: - # Another thread already set this trait for this provider. Ignore - # this for now (but ConcurrentUpdateDetected will end up being - # raised almost assuredly when we go to increment the resource - # provider's generation later, but that's also fine) - pass - - -def _delete_traits_from_provider(ctx, rp_id, to_delete): - """Deletes trait associations from the provider with the supplied ID and - set() of internal trait IDs. - - :param ctx: `nova.context.RequestContext` that has an oslo_db Session - :param rp_id: Internal ID of the resource provider from which to delete - trait associations - :param to_delete: set() containing internal trait IDs for traits to - delete - """ - del_stmt = _RP_TRAIT_TBL.delete().where( - sa.and_( - _RP_TRAIT_TBL.c.resource_provider_id == rp_id, - _RP_TRAIT_TBL.c.trait_id.in_(to_delete))) - ctx.session.execute(del_stmt) - - -@db_api.placement_context_manager.writer -def _set_traits(context, rp, traits): - """Given a ResourceProvider object and a TraitList object, replaces the set - of traits associated with the resource provider. - - :raises: ConcurrentUpdateDetected if the resource provider's traits or - inventory was changed in between the time when we first started to - set traits and the end of this routine. - - :param rp: The ResourceProvider object to set traits against - :param traits: A TraitList object or list of Trait objects - """ - # Get the internal IDs of our existing traits - existing_traits = _get_traits_by_provider_id(context, rp.id) - existing_traits = set(rec['id'] for rec in existing_traits) - want_traits = set(trait.id for trait in traits) - - to_add = want_traits - existing_traits - to_delete = existing_traits - want_traits - - if not to_add and not to_delete: - return - - if to_delete: - _delete_traits_from_provider(context, rp.id, to_delete) - if to_add: - _add_traits_to_provider(context, rp.id, to_add) - rp.generation = _increment_provider_generation(context, rp) - - -@db_api.placement_context_manager.reader -def _has_child_providers(context, rp_id): - """Returns True if the supplied resource provider has any child providers, - False otherwise - """ - child_sel = sa.select([_RP_TBL.c.id]) - child_sel = child_sel.where(_RP_TBL.c.parent_provider_id == rp_id) - child_res = context.session.execute(child_sel.limit(1)).fetchone() - if child_res: - return True - return False - - -@db_api.placement_context_manager.writer -def _set_root_provider_id(context, rp_id, root_id): - """Simply sets the root_provider_id value for a provider identified by - rp_id. Used in online data migration. - - :param rp_id: Internal ID of the provider to update - :param root_id: Value to set root provider to - """ - upd = _RP_TBL.update().where(_RP_TBL.c.id == rp_id) - upd = upd.values(root_provider_id=root_id) - context.session.execute(upd) - - -ProviderIds = collections.namedtuple( - 'ProviderIds', 'id uuid parent_id parent_uuid root_id root_uuid') - - -def _provider_ids_from_rp_ids(context, rp_ids): - """Given an iterable of internal resource provider IDs, returns a dict, - keyed by internal provider Id, of ProviderIds namedtuples describing those - providers. - - :returns: dict, keyed by internal provider Id, of ProviderIds namedtuples - :param rp_ids: iterable of internal provider IDs to look up - """ - # SELECT - # rp.id, rp.uuid, - # parent.id AS parent_id, parent.uuid AS parent_uuid, - # root.id AS root_id, root.uuid AS root_uuid - # FROM resource_providers AS rp - # LEFT JOIN resource_providers AS parent - # ON rp.parent_provider_id = parent.id - # LEFT JOIN resource_providers AS root - # ON rp.root_provider_id = root.id - # WHERE rp.id IN ($rp_ids) - me = sa.alias(_RP_TBL, name="me") - parent = sa.alias(_RP_TBL, name="parent") - root = sa.alias(_RP_TBL, name="root") - cols = [ - me.c.id, - me.c.uuid, - parent.c.id.label('parent_id'), - parent.c.uuid.label('parent_uuid'), - root.c.id.label('root_id'), - root.c.uuid.label('root_uuid'), - ] - # TODO(jaypipes): Change this to an inner join when we are sure all - # root_provider_id values are NOT NULL - me_to_root = sa.outerjoin(me, root, me.c.root_provider_id == root.c.id) - me_to_parent = sa.outerjoin(me_to_root, parent, - me.c.parent_provider_id == parent.c.id) - sel = sa.select(cols).select_from(me_to_parent) - sel = sel.where(me.c.id.in_(rp_ids)) - return { - r[0]: ProviderIds(**dict(r)) for r in context.session.execute(sel) - } - - -def _provider_ids_from_uuid(context, uuid): - """Given the UUID of a resource provider, returns a namedtuple - (ProviderIds) with the internal ID, the UUID, the parent provider's - internal ID, parent provider's UUID, the root provider's internal ID and - the root provider UUID. - - :returns: ProviderIds object containing the internal IDs and UUIDs of the - provider identified by the supplied UUID - :param uuid: The UUID of the provider to look up - """ - # SELECT - # rp.id, rp.uuid, - # parent.id AS parent_id, parent.uuid AS parent_uuid, - # root.id AS root_id, root.uuid AS root_uuid - # FROM resource_providers AS rp - # LEFT JOIN resource_providers AS parent - # ON rp.parent_provider_id = parent.id - # LEFT JOIN resource_providers AS root - # ON rp.root_provider_id = root.id - me = sa.alias(_RP_TBL, name="me") - parent = sa.alias(_RP_TBL, name="parent") - root = sa.alias(_RP_TBL, name="root") - cols = [ - me.c.id, - me.c.uuid, - parent.c.id.label('parent_id'), - parent.c.uuid.label('parent_uuid'), - root.c.id.label('root_id'), - root.c.uuid.label('root_uuid'), - ] - # TODO(jaypipes): Change this to an inner join when we are sure all - # root_provider_id values are NOT NULL - me_to_root = sa.outerjoin(me, root, me.c.root_provider_id == root.c.id) - me_to_parent = sa.outerjoin(me_to_root, parent, - me.c.parent_provider_id == parent.c.id) - sel = sa.select(cols).select_from(me_to_parent) - sel = sel.where(me.c.uuid == uuid) - res = context.session.execute(sel).fetchone() - if not res: - return None - return ProviderIds(**dict(res)) - - -def _provider_ids_matching_aggregates(context, member_of, rp_ids=None): - """Given a list of lists of aggregate UUIDs, return the internal IDs of all - resource providers associated with the aggregates. - - :param member_of: A list containing lists of aggregate UUIDs. Each item in - the outer list is to be AND'd together. If that item contains multiple - values, they are OR'd together. - - For example, if member_of is:: - - [ - ['agg1'], - ['agg2', 'agg3'], - ] - - we will return all the resource providers that are - associated with agg1 as well as either (agg2 or agg3) - :param rp_ids: When present, returned resource providers are limited - to only those in this value - - :returns: A list of internal resource provider IDs having all required - aggregate associations - """ - # Given a request for the following: - # - # member_of = [ - # [agg1], - # [agg2], - # [agg3, agg4] - # ] - # - # we need to produce the following SQL expression: - # - # SELECT - # rp.id - # FROM resource_providers AS rp - # JOIN resource_provider_aggregates AS rpa1 - # ON rp.id = rpa1.resource_provider_id - # AND rpa1.aggregate_id IN ($AGG1_ID) - # JOIN resource_provider_aggregates AS rpa2 - # ON rp.id = rpa2.resource_provider_id - # AND rpa2.aggregate_id IN ($AGG2_ID) - # JOIN resource_provider_aggregates AS rpa3 - # ON rp.id = rpa3.resource_provider_id - # AND rpa3.aggregate_id IN ($AGG3_ID, $AGG4_ID) - # # Only if we have rp_ids... - # WHERE rp.id IN ($RP_IDs) - - # First things first, get a map of all the aggregate UUID to internal - # aggregate IDs - agg_uuids = set() - for members in member_of: - for member in members: - agg_uuids.add(member) - agg_tbl = sa.alias(_AGG_TBL, name='aggs') - agg_sel = sa.select([agg_tbl.c.uuid, agg_tbl.c.id]) - agg_sel = agg_sel.where(agg_tbl.c.uuid.in_(agg_uuids)) - agg_uuid_map = { - r[0]: r[1] for r in context.session.execute(agg_sel).fetchall() - } - - rp_tbl = sa.alias(_RP_TBL, name='rp') - join_chain = rp_tbl - - for x, members in enumerate(member_of): - rpa_tbl = sa.alias(_RP_AGG_TBL, name='rpa%d' % x) - - agg_ids = [agg_uuid_map[member] for member in members - if member in agg_uuid_map] - if not agg_ids: - # This member_of list contains only non-existent aggregate UUIDs - # and therefore we will always return 0 results, so short-circuit - return [] - - join_cond = sa.and_( - rp_tbl.c.id == rpa_tbl.c.resource_provider_id, - rpa_tbl.c.aggregate_id.in_(agg_ids)) - join_chain = sa.join(join_chain, rpa_tbl, join_cond) - sel = sa.select([rp_tbl.c.id]).select_from(join_chain) - if rp_ids: - sel = sel.where(rp_tbl.c.id.in_(rp_ids)) - return [r[0] for r in context.session.execute(sel).fetchall()] - - -@db_api.placement_context_manager.writer -def _delete_rp_record(context, _id): - return context.session.query(models.ResourceProvider).\ - filter(models.ResourceProvider.id == _id).\ - delete(synchronize_session=False) - - -@base.VersionedObjectRegistry.register_if(False) -class ResourceProvider(base.VersionedObject, base.TimestampedObject): - SETTABLE_FIELDS = ('name', 'parent_provider_uuid') - - fields = { - 'id': fields.IntegerField(read_only=True), - 'uuid': fields.UUIDField(nullable=False), - 'name': fields.StringField(nullable=False), - 'generation': fields.IntegerField(nullable=False), - # UUID of the root provider in a hierarchy of providers. Will be equal - # to the uuid field if this provider is the root provider of a - # hierarchy. This field is never manually set by the user. Instead, it - # is automatically set to either the root provider UUID of the parent - # or the UUID of the provider itself if there is no parent. This field - # is an optimization field that allows us to very quickly query for all - # providers within a particular tree without doing any recursive - # querying. - 'root_provider_uuid': fields.UUIDField(nullable=False), - # UUID of the direct parent provider, or None if this provider is a - # "root" provider. - 'parent_provider_uuid': fields.UUIDField(nullable=True, default=None), - } - - def create(self): - if 'id' in self: - raise exception.ObjectActionError(action='create', - reason='already created') - if 'uuid' not in self: - raise exception.ObjectActionError(action='create', - reason='uuid is required') - if 'name' not in self: - raise exception.ObjectActionError(action='create', - reason='name is required') - if 'root_provider_uuid' in self: - raise exception.ObjectActionError( - action='create', - reason=_('root provider UUID cannot be manually set.')) - - self.obj_set_defaults() - updates = self.obj_get_changes() - self._create_in_db(self._context, updates) - self.obj_reset_changes() - - def destroy(self): - self._delete(self._context, self.id) - - def save(self): - updates = self.obj_get_changes() - if updates and any(k not in self.SETTABLE_FIELDS - for k in updates.keys()): - raise exception.ObjectActionError( - action='save', - reason='Immutable fields changed') - self._update_in_db(self._context, self.id, updates) - self.obj_reset_changes() - - @classmethod - def get_by_uuid(cls, context, uuid): - """Returns a new ResourceProvider object with the supplied UUID. - - :raises NotFound if no such provider could be found - :param uuid: UUID of the provider to search for - """ - rp_rec = _get_provider_by_uuid(context, uuid) - return cls._from_db_object(context, cls(), rp_rec) - - def add_inventory(self, inventory): - """Add one new Inventory to the resource provider. - - Fails if Inventory of the provided resource class is - already present. - """ - _add_inventory(self._context, self, inventory) - self.obj_reset_changes() - - def delete_inventory(self, resource_class): - """Delete Inventory of provided resource_class.""" - _delete_inventory(self._context, self, resource_class) - self.obj_reset_changes() - - def set_inventory(self, inv_list): - """Set all resource provider Inventory to be the provided list.""" - exceeded = _set_inventory(self._context, self, inv_list) - for uuid, rclass in exceeded: - LOG.warning('Resource provider %(uuid)s is now over-' - 'capacity for %(resource)s', - {'uuid': uuid, 'resource': rclass}) - self.obj_reset_changes() - - def update_inventory(self, inventory): - """Update one existing Inventory of the same resource class. - - Fails if no Inventory of the same class is present. - """ - exceeded = _update_inventory(self._context, self, inventory) - for uuid, rclass in exceeded: - LOG.warning('Resource provider %(uuid)s is now over-' - 'capacity for %(resource)s', - {'uuid': uuid, 'resource': rclass}) - self.obj_reset_changes() - - def get_aggregates(self): - """Get the aggregate uuids associated with this resource provider.""" - return _get_aggregates_by_provider_id(self._context, self.id) - - def set_aggregates(self, aggregate_uuids, increment_generation=False): - """Set the aggregate uuids associated with this resource provider. - - If an aggregate does not exist, one will be created using the - provided uuid. - - The resource provider generation is incremented if and only if the - increment_generation parameter is True. - """ - _set_aggregates(self._context, self, aggregate_uuids, - increment_generation=increment_generation) - - def set_traits(self, traits): - """Replaces the set of traits associated with the resource provider - with the given list of Trait objects. - - :param traits: A list of Trait objects representing the traits to - associate with the provider. - """ - _set_traits(self._context, self, traits) - self.obj_reset_changes() - - @db_api.placement_context_manager.writer - def _create_in_db(self, context, updates): - parent_id = None - root_id = None - # User supplied a parent, let's make sure it exists - parent_uuid = updates.pop('parent_provider_uuid') - if parent_uuid is not None: - # Setting parent to ourselves doesn't make any sense - if parent_uuid == self.uuid: - raise exception.ObjectActionError( - action='create', - reason=_('parent provider UUID cannot be same as ' - 'UUID. Please set parent provider UUID to ' - 'None if there is no parent.')) - - parent_ids = _provider_ids_from_uuid(context, parent_uuid) - if parent_ids is None: - raise exception.ObjectActionError( - action='create', - reason=_('parent provider UUID does not exist.')) - - parent_id = parent_ids.id - root_id = parent_ids.root_id - updates['root_provider_id'] = root_id - updates['parent_provider_id'] = parent_id - self.root_provider_uuid = parent_ids.root_uuid - - db_rp = models.ResourceProvider() - db_rp.update(updates) - context.session.add(db_rp) - context.session.flush() - - self.id = db_rp.id - self.generation = db_rp.generation - - if root_id is None: - # User did not specify a parent when creating this provider, so the - # root_provider_id needs to be set to this provider's newly-created - # internal ID - db_rp.root_provider_id = db_rp.id - context.session.add(db_rp) - context.session.flush() - self.root_provider_uuid = self.uuid - - @staticmethod - @db_api.placement_context_manager.writer - def _delete(context, _id): - # Do a quick check to see if the provider is a parent. If it is, don't - # allow deleting the provider. Note that the foreign key constraint on - # resource_providers.parent_provider_id will prevent deletion of the - # parent within the transaction below. This is just a quick - # short-circuit outside of the transaction boundary. - if _has_child_providers(context, _id): - raise exception.CannotDeleteParentResourceProvider() - - # Don't delete the resource provider if it has allocations. - rp_allocations = context.session.query(models.Allocation).\ - filter(models.Allocation.resource_provider_id == _id).\ - count() - if rp_allocations: - raise exception.ResourceProviderInUse() - # Delete any inventory associated with the resource provider - context.session.query(models.Inventory).\ - filter(models.Inventory.resource_provider_id == _id).\ - delete(synchronize_session=False) - # Delete any aggregate associations for the resource provider - # The name substitution on the next line is needed to satisfy pep8 - RPA_model = models.ResourceProviderAggregate - context.session.query(RPA_model).\ - filter(RPA_model.resource_provider_id == _id).delete() - # delete any trait associations for the resource provider - RPT_model = models.ResourceProviderTrait - context.session.query(RPT_model).\ - filter(RPT_model.resource_provider_id == _id).delete() - # set root_provider_id to null to make deletion possible - context.session.query(models.ResourceProvider).\ - filter(models.ResourceProvider.id == _id, - models.ResourceProvider.root_provider_id == _id).\ - update({'root_provider_id': None}) - # Now delete the RP record - try: - result = _delete_rp_record(context, _id) - except sqla_exc.IntegrityError: - # NOTE(jaypipes): Another thread snuck in and parented this - # resource provider in between the above check for - # _has_child_providers() and our attempt to delete the record - raise exception.CannotDeleteParentResourceProvider() - if not result: - raise exception.NotFound() - - @db_api.placement_context_manager.writer - def _update_in_db(self, context, id, updates): - # A list of resource providers in the same tree with the - # resource provider to update - same_tree = [] - if 'parent_provider_uuid' in updates: - # TODO(jaypipes): For now, "re-parenting" and "un-parenting" are - # not possible. If the provider already had a parent, we don't - # allow changing that parent due to various issues, including: - # - # * if the new parent is a descendant of this resource provider, we - # introduce the possibility of a loop in the graph, which would - # be very bad - # * potentially orphaning heretofore-descendants - # - # So, for now, let's just prevent re-parenting... - my_ids = _provider_ids_from_uuid(context, self.uuid) - parent_uuid = updates.pop('parent_provider_uuid') - if parent_uuid is not None: - parent_ids = _provider_ids_from_uuid(context, parent_uuid) - # User supplied a parent, let's make sure it exists - if parent_ids is None: - raise exception.ObjectActionError( - action='create', - reason=_('parent provider UUID does not exist.')) - if (my_ids.parent_id is not None and - my_ids.parent_id != parent_ids.id): - raise exception.ObjectActionError( - action='update', - reason=_('re-parenting a provider is not ' - 'currently allowed.')) - if my_ids.parent_uuid is None: - # So the user specifies a parent for an RP that doesn't - # have one. We have to check that by this new parent we - # don't create a loop in the tree. Basically the new parent - # cannot be the RP itself or one of its descendants. - # However as the RP's current parent is None the above - # condition is the same as "the new parent cannot be any RP - # from the current RP tree". - same_tree = ResourceProviderList.get_all_by_filters( - context, - filters={'in_tree': self.uuid}) - rp_uuids_in_the_same_tree = [rp.uuid for rp in same_tree] - if parent_uuid in rp_uuids_in_the_same_tree: - raise exception.ObjectActionError( - action='update', - reason=_('creating loop in the provider tree is ' - 'not allowed.')) - - updates['root_provider_id'] = parent_ids.root_id - updates['parent_provider_id'] = parent_ids.id - self.root_provider_uuid = parent_ids.root_uuid - else: - if my_ids.parent_id is not None: - raise exception.ObjectActionError( - action='update', - reason=_('un-parenting a provider is not ' - 'currently allowed.')) - - db_rp = context.session.query(models.ResourceProvider).filter_by( - id=id).first() - db_rp.update(updates) - context.session.add(db_rp) - - # We should also update the root providers of resource providers - # originally in the same tree. If re-parenting is supported, - # this logic should be changed to update only descendents of the - # re-parented resource providers, not all the providers in the tree. - for rp in same_tree: - # If the parent is not updated, this clause is skipped since the - # `same_tree` has no element. - rp.root_provider_uuid = parent_ids.root_uuid - db_rp = context.session.query( - models.ResourceProvider).filter_by(id=rp.id).first() - data = {'root_provider_id': parent_ids.root_id} - db_rp.update(data) - context.session.add(db_rp) - - try: - context.session.flush() - except sqla_exc.IntegrityError: - # NOTE(jaypipes): Another thread snuck in and deleted the parent - # for this resource provider in between the above check for a valid - # parent provider and here... - raise exception.ObjectActionError( - action='update', - reason=_('parent provider UUID does not exist.')) - - @staticmethod - @db_api.placement_context_manager.writer # For online data migration - def _from_db_object(context, resource_provider, db_resource_provider): - # Online data migration to populate root_provider_id - # TODO(jaypipes): Remove when all root_provider_id values are NOT NULL - if db_resource_provider['root_provider_uuid'] is None: - rp_id = db_resource_provider['id'] - uuid = db_resource_provider['uuid'] - db_resource_provider['root_provider_uuid'] = uuid - _set_root_provider_id(context, rp_id, rp_id) - for field in resource_provider.fields: - setattr(resource_provider, field, db_resource_provider[field]) - resource_provider._context = context - resource_provider.obj_reset_changes() - return resource_provider - - -@db_api.placement_context_manager.reader -def _get_providers_with_shared_capacity(ctx, rc_id, amount, member_of=None): - """Returns a list of resource provider IDs (internal IDs, not UUIDs) - that have capacity for a requested amount of a resource and indicate that - they share resource via an aggregate association. - - Shared resource providers are marked with a standard trait called - MISC_SHARES_VIA_AGGREGATE. This indicates that the provider allows its - inventory to be consumed by other resource providers associated via an - aggregate link. - - For example, assume we have two compute nodes, CN_1 and CN_2, each with - inventory of VCPU and MEMORY_MB but not DISK_GB (in other words, these are - compute nodes with no local disk). There is a resource provider called - "NFS_SHARE" that has an inventory of DISK_GB and has the - MISC_SHARES_VIA_AGGREGATE trait. Both the "CN_1" and "CN_2" compute node - resource providers and the "NFS_SHARE" resource provider are associated - with an aggregate called "AGG_1". - - The scheduler needs to determine the resource providers that can fulfill a - request for 2 VCPU, 1024 MEMORY_MB and 100 DISK_GB. - - Clearly, no single provider can satisfy the request for all three - resources, since neither compute node has DISK_GB inventory and the - NFS_SHARE provider has no VCPU or MEMORY_MB inventories. - - However, if we consider the NFS_SHARE resource provider as providing - inventory of DISK_GB for both CN_1 and CN_2, we can include CN_1 and CN_2 - as potential fits for the requested set of resources. - - To facilitate that matching query, this function returns all providers that - indicate they share their inventory with providers in some aggregate and - have enough capacity for the requested amount of a resource. - - To follow the example above, if we were to call - _get_providers_with_shared_capacity(ctx, "DISK_GB", 100), we would want to - get back the ID for the NFS_SHARE resource provider. - - :param rc_id: Internal ID of the requested resource class. - :param amount: Amount of the requested resource. - :param member_of: When present, contains a list of lists of aggregate - uuids that are used to filter the returned list of - resource providers that *directly* belong to the - aggregates referenced. - """ - # The SQL we need to generate here looks like this: - # - # SELECT rp.id - # FROM resource_providers AS rp - # INNER JOIN resource_provider_traits AS rpt - # ON rp.id = rpt.resource_provider_id - # INNER JOIN traits AS t - # ON rpt.trait_id = t.id - # AND t.name = "MISC_SHARES_VIA_AGGREGATE" - # INNER JOIN inventories AS inv - # ON rp.id = inv.resource_provider_id - # AND inv.resource_class_id = $rc_id - # LEFT JOIN ( - # SELECT resource_provider_id, SUM(used) as used - # FROM allocations - # WHERE resource_class_id = $rc_id - # GROUP BY resource_provider_id - # ) AS usage - # ON rp.id = usage.resource_provider_id - # WHERE COALESCE(usage.used, 0) + $amount <= ( - # inv.total - inv.reserved) * inv.allocation_ratio - # ) AND - # inv.min_unit <= $amount AND - # inv.max_unit >= $amount AND - # $amount % inv.step_size = 0 - # GROUP BY rp.id - - rp_tbl = sa.alias(_RP_TBL, name='rp') - inv_tbl = sa.alias(_INV_TBL, name='inv') - t_tbl = sa.alias(_TRAIT_TBL, name='t') - rpt_tbl = sa.alias(_RP_TRAIT_TBL, name='rpt') - - rp_to_rpt_join = sa.join( - rp_tbl, rpt_tbl, - rp_tbl.c.id == rpt_tbl.c.resource_provider_id, - ) - - rpt_to_t_join = sa.join( - rp_to_rpt_join, t_tbl, - sa.and_( - rpt_tbl.c.trait_id == t_tbl.c.id, - # The traits table wants unicode trait names, but os_traits - # presents native str, so we need to cast. - t_tbl.c.name == six.text_type(os_traits.MISC_SHARES_VIA_AGGREGATE), - ), - ) - - rp_to_inv_join = sa.join( - rpt_to_t_join, inv_tbl, - sa.and_( - rpt_tbl.c.resource_provider_id == inv_tbl.c.resource_provider_id, - inv_tbl.c.resource_class_id == rc_id, - ), - ) - - usage = sa.select([_ALLOC_TBL.c.resource_provider_id, - sql.func.sum(_ALLOC_TBL.c.used).label('used')]) - usage = usage.where(_ALLOC_TBL.c.resource_class_id == rc_id) - usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id) - usage = sa.alias(usage, name='usage') - - inv_to_usage_join = sa.outerjoin( - rp_to_inv_join, usage, - inv_tbl.c.resource_provider_id == usage.c.resource_provider_id, - ) - - where_conds = sa.and_( - func.coalesce(usage.c.used, 0) + amount <= ( - inv_tbl.c.total - inv_tbl.c.reserved) * inv_tbl.c.allocation_ratio, - inv_tbl.c.min_unit <= amount, - inv_tbl.c.max_unit >= amount, - amount % inv_tbl.c.step_size == 0) - - # If 'member_of' has values, do a separate lookup to identify the - # resource providers that meet the member_of constraints. - if member_of: - rps_in_aggs = _provider_ids_matching_aggregates(ctx, member_of) - if not rps_in_aggs: - # Short-circuit. The user either asked for a non-existing - # aggregate or there were no resource providers that matched - # the requirements... - return [] - where_conds.append(rp_tbl.c.id.in_(rps_in_aggs)) - - sel = sa.select([rp_tbl.c.id]).select_from(inv_to_usage_join) - sel = sel.where(where_conds) - sel = sel.group_by(rp_tbl.c.id) - - return [r[0] for r in ctx.session.execute(sel)] - - -@base.VersionedObjectRegistry.register_if(False) -class ResourceProviderList(base.ObjectListBase, base.VersionedObject): - - fields = { - 'objects': fields.ListOfObjectsField('ResourceProvider'), - } - - @staticmethod - @db_api.placement_context_manager.reader - def _get_all_by_filters_from_db(context, filters): - # Eg. filters can be: - # filters = { - # 'name': , - # 'uuid': , - # 'member_of': [[, ], - # []] - # 'resources': { - # 'VCPU': 1, - # 'MEMORY_MB': 1024 - # }, - # 'in_tree': , - # 'required': [, ...] - # } - if not filters: - filters = {} - else: - # Since we modify the filters, copy them so that we don't modify - # them in the calling program. - filters = copy.deepcopy(filters) - name = filters.pop('name', None) - uuid = filters.pop('uuid', None) - member_of = filters.pop('member_of', []) - required = set(filters.pop('required', [])) - forbidden = set([trait for trait in required - if trait.startswith('!')]) - required = required - forbidden - forbidden = set([trait.lstrip('!') for trait in forbidden]) - - resources = filters.pop('resources', {}) - # NOTE(sbauza): We want to key the dict by the resource class IDs - # and we want to make sure those class names aren't incorrect. - resources = {_RC_CACHE.id_from_string(r_name): amount - for r_name, amount in resources.items()} - rp = sa.alias(_RP_TBL, name="rp") - root_rp = sa.alias(_RP_TBL, name="root_rp") - parent_rp = sa.alias(_RP_TBL, name="parent_rp") - - cols = [ - rp.c.id, - rp.c.uuid, - rp.c.name, - rp.c.generation, - rp.c.updated_at, - rp.c.created_at, - root_rp.c.uuid.label("root_provider_uuid"), - parent_rp.c.uuid.label("parent_provider_uuid"), - ] - - # TODO(jaypipes): Convert this to an inner join once all - # root_provider_id values are NOT NULL - rp_to_root = sa.outerjoin(rp, root_rp, - rp.c.root_provider_id == root_rp.c.id) - rp_to_parent = sa.outerjoin(rp_to_root, parent_rp, - rp.c.parent_provider_id == parent_rp.c.id) - - query = sa.select(cols).select_from(rp_to_parent) - - if name: - query = query.where(rp.c.name == name) - if uuid: - query = query.where(rp.c.uuid == uuid) - if 'in_tree' in filters: - # The 'in_tree' parameter is the UUID of a resource provider that - # the caller wants to limit the returned providers to only those - # within its "provider tree". So, we look up the resource provider - # having the UUID specified by the 'in_tree' parameter and grab the - # root_provider_id value of that record. We can then ask for only - # those resource providers having a root_provider_id of that value. - tree_uuid = filters.pop('in_tree') - tree_ids = _provider_ids_from_uuid(context, tree_uuid) - if tree_ids is None: - # List operations should simply return an empty list when a - # non-existing resource provider UUID is given. - return [] - root_id = tree_ids.root_id - # TODO(jaypipes): Remove this OR condition when root_provider_id - # is not nullable in the database and all resource provider records - # have populated the root provider ID. - where_cond = sa.or_(rp.c.id == root_id, - rp.c.root_provider_id == root_id) - query = query.where(where_cond) - - # If 'member_of' has values, do a separate lookup to identify the - # resource providers that meet the member_of constraints. - if member_of: - rps_in_aggs = _provider_ids_matching_aggregates(context, member_of) - if not rps_in_aggs: - # Short-circuit. The user either asked for a non-existing - # aggregate or there were no resource providers that matched - # the requirements... - return [] - query = query.where(rp.c.id.in_(rps_in_aggs)) - - # If 'required' has values, add a filter to limit results to providers - # possessing *all* of the listed traits. - if required: - trait_map = _trait_ids_from_names(context, required) - if len(trait_map) != len(required): - missing = required - set(trait_map) - raise exception.TraitNotFound(names=', '.join(missing)) - rp_ids = _get_provider_ids_having_all_traits(context, trait_map) - if not rp_ids: - # If no providers have the required traits, we're done - return [] - query = query.where(rp.c.id.in_(rp_ids)) - - # If 'forbidden' has values, filter out those providers that have - # that trait as one their traits. - if forbidden: - trait_map = _trait_ids_from_names(context, forbidden) - if len(trait_map) != len(forbidden): - missing = forbidden - set(trait_map) - raise exception.TraitNotFound(names=', '.join(missing)) - rp_ids = _get_provider_ids_having_any_trait(context, trait_map) - if rp_ids: - query = query.where(~rp.c.id.in_(rp_ids)) - - if not resources: - # Returns quickly the list in case we don't need to check the - # resource usage - res = context.session.execute(query).fetchall() - return [dict(r) for r in res] - - # NOTE(sbauza): In case we want to look at the resource criteria, then - # the SQL generated from this case looks something like: - # SELECT - # rp.* - # FROM resource_providers AS rp - # JOIN inventories AS inv - # ON rp.id = inv.resource_provider_id - # LEFT JOIN ( - # SELECT resource_provider_id, resource_class_id, SUM(used) AS used - # FROM allocations - # WHERE resource_class_id IN ($RESOURCE_CLASSES) - # GROUP BY resource_provider_id, resource_class_id - # ) AS usage - # ON inv.resource_provider_id = usage.resource_provider_id - # AND inv.resource_class_id = usage.resource_class_id - # AND (inv.resource_class_id = $X AND (used + $AMOUNT_X <= ( - # total - reserved) * inv.allocation_ratio) AND - # inv.min_unit <= $AMOUNT_X AND inv.max_unit >= $AMOUNT_X AND - # $AMOUNT_X % inv.step_size == 0) - # OR (inv.resource_class_id = $Y AND (used + $AMOUNT_Y <= ( - # total - reserved) * inv.allocation_ratio) AND - # inv.min_unit <= $AMOUNT_Y AND inv.max_unit >= $AMOUNT_Y AND - # $AMOUNT_Y % inv.step_size == 0) - # OR (inv.resource_class_id = $Z AND (used + $AMOUNT_Z <= ( - # total - reserved) * inv.allocation_ratio) AND - # inv.min_unit <= $AMOUNT_Z AND inv.max_unit >= $AMOUNT_Z AND - # $AMOUNT_Z % inv.step_size == 0)) - # GROUP BY rp.id - # HAVING - # COUNT(DISTINCT(inv.resource_class_id)) == len($RESOURCE_CLASSES) - # - # with a possible additional WHERE clause for the name and uuid that - # comes from the above filters - - # First JOIN between inventories and RPs is here - inv_join = sa.join(rp_to_parent, _INV_TBL, - rp.c.id == _INV_TBL.c.resource_provider_id) - - # Now, below is the LEFT JOIN for getting the allocations usage - usage = sa.select([_ALLOC_TBL.c.resource_provider_id, - _ALLOC_TBL.c.resource_class_id, - sql.func.sum(_ALLOC_TBL.c.used).label('used')]) - usage = usage.where(_ALLOC_TBL.c.resource_class_id.in_(resources)) - usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id, - _ALLOC_TBL.c.resource_class_id) - usage = sa.alias(usage, name='usage') - usage_join = sa.outerjoin(inv_join, usage, - sa.and_( - usage.c.resource_provider_id == ( - _INV_TBL.c.resource_provider_id), - usage.c.resource_class_id == _INV_TBL.c.resource_class_id)) - - # And finally, we verify for each resource class if the requested - # amount isn't more than the left space (considering the allocation - # ratio, the reserved space and the min and max amount possible sizes) - where_clauses = [ - sa.and_( - _INV_TBL.c.resource_class_id == r_idx, - (func.coalesce(usage.c.used, 0) + amount <= ( - _INV_TBL.c.total - _INV_TBL.c.reserved - ) * _INV_TBL.c.allocation_ratio), - _INV_TBL.c.min_unit <= amount, - _INV_TBL.c.max_unit >= amount, - amount % _INV_TBL.c.step_size == 0 - ) - for (r_idx, amount) in resources.items()] - query = query.select_from(usage_join) - query = query.where(sa.or_(*where_clauses)) - query = query.group_by(rp.c.id, root_rp.c.uuid, parent_rp.c.uuid) - # NOTE(sbauza): Only RPs having all the asked resources can be provided - query = query.having(sql.func.count( - sa.distinct(_INV_TBL.c.resource_class_id)) == len(resources)) - - res = context.session.execute(query).fetchall() - return [dict(r) for r in res] - - @classmethod - def get_all_by_filters(cls, context, filters=None): - """Returns a list of `ResourceProvider` objects that have sufficient - resources in their inventories to satisfy the amounts specified in the - `filters` parameter. - - If no resource providers can be found, the function will return an - empty list. - - :param context: `nova.context.RequestContext` that may be used to grab - a DB connection. - :param filters: Can be `name`, `uuid`, `member_of`, `in_tree` or - `resources` where `member_of` is a list of list of - aggregate UUIDs, `in_tree` is a UUID of a resource - provider that we can use to find the root provider ID - of the tree of providers to filter results by and - `resources` is a dict of amounts keyed by resource - classes. - :type filters: dict - """ - resource_providers = cls._get_all_by_filters_from_db(context, filters) - return base.obj_make_list(context, cls(context), - ResourceProvider, resource_providers) - - -@base.VersionedObjectRegistry.register_if(False) -class Inventory(base.VersionedObject, base.TimestampedObject): - - fields = { - 'id': fields.IntegerField(read_only=True), - 'resource_provider': fields.ObjectField('ResourceProvider'), - 'resource_class': rc_fields.ResourceClassField(read_only=True), - 'total': fields.NonNegativeIntegerField(), - 'reserved': fields.NonNegativeIntegerField(default=0), - 'min_unit': fields.NonNegativeIntegerField(default=1), - 'max_unit': fields.NonNegativeIntegerField(default=1), - 'step_size': fields.NonNegativeIntegerField(default=1), - 'allocation_ratio': fields.NonNegativeFloatField(default=1.0), - } - - @property - def capacity(self): - """Inventory capacity, adjusted by allocation_ratio.""" - return int((self.total - self.reserved) * self.allocation_ratio) - - -@db_api.placement_context_manager.reader -def _get_inventory_by_provider_id(ctx, rp_id): - inv = sa.alias(_INV_TBL, name="i") - cols = [ - inv.c.resource_class_id, - inv.c.total, - inv.c.reserved, - inv.c.min_unit, - inv.c.max_unit, - inv.c.step_size, - inv.c.allocation_ratio, - inv.c.updated_at, - inv.c.created_at, - ] - sel = sa.select(cols) - sel = sel.where(inv.c.resource_provider_id == rp_id) - - return [dict(r) for r in ctx.session.execute(sel)] - - -@base.VersionedObjectRegistry.register_if(False) -class InventoryList(base.ObjectListBase, base.VersionedObject): - - fields = { - 'objects': fields.ListOfObjectsField('Inventory'), - } - - def find(self, res_class): - """Return the inventory record from the list of Inventory records that - matches the supplied resource class, or None. - - :param res_class: An integer or string representing a resource - class. If the value is a string, the method first - looks up the resource class identifier from the - string. - """ - if not isinstance(res_class, six.string_types): - raise ValueError - - for inv_rec in self.objects: - if inv_rec.resource_class == res_class: - return inv_rec - - @classmethod - def get_all_by_resource_provider(cls, context, rp): - db_inv = _get_inventory_by_provider_id(context, rp.id) - # Build up a list of Inventory objects, setting the Inventory object - # fields to the same-named database record field we got from - # _get_inventory_by_provider_id(). We already have the ResourceProvider - # object so we just pass that object to the Inventory object - # constructor as-is - objs = [ - Inventory( - context, resource_provider=rp, - resource_class=_RC_CACHE.string_from_id( - rec['resource_class_id']), - **rec) - for rec in db_inv - ] - inv_list = cls(context, objects=objs) - return inv_list - - -@base.VersionedObjectRegistry.register_if(False) -class Allocation(base.VersionedObject, base.TimestampedObject): - - fields = { - 'id': fields.IntegerField(), - 'resource_provider': fields.ObjectField('ResourceProvider'), - 'consumer': fields.ObjectField('Consumer', nullable=False), - 'resource_class': rc_fields.ResourceClassField(), - 'used': fields.IntegerField(), - } - - -@db_api.placement_context_manager.writer -def _delete_allocations_for_consumer(ctx, consumer_id): - """Deletes any existing allocations that correspond to the allocations to - be written. This is wrapped in a transaction, so if the write subsequently - fails, the deletion will also be rolled back. - """ - del_sql = _ALLOC_TBL.delete().where( - _ALLOC_TBL.c.consumer_id == consumer_id) - ctx.session.execute(del_sql) - - -@db_api.placement_context_manager.writer -def _delete_allocations_by_ids(ctx, alloc_ids): - """Deletes allocations having an internal id value in the set of supplied - IDs - """ - del_sql = _ALLOC_TBL.delete().where(_ALLOC_TBL.c.id.in_(alloc_ids)) - ctx.session.execute(del_sql) - - -def _check_capacity_exceeded(ctx, allocs): - """Checks to see if the supplied allocation records would result in any of - the inventories involved having their capacity exceeded. - - Raises an InvalidAllocationCapacityExceeded exception if any inventory - would be exhausted by the allocation. Raises an - InvalidAllocationConstraintsViolated exception if any of the `step_size`, - `min_unit` or `max_unit` constraints in an inventory will be violated - by any one of the allocations. - - If no inventories would be exceeded or violated by the allocations, the - function returns a list of `ResourceProvider` objects that contain the - generation at the time of the check. - - :param ctx: `nova.context.RequestContext` that has an oslo_db Session - :param allocs: List of `Allocation` objects to check - """ - # The SQL generated below looks like this: - # SELECT - # rp.id, - # rp.uuid, - # rp.generation, - # inv.resource_class_id, - # inv.total, - # inv.reserved, - # inv.allocation_ratio, - # allocs.used - # FROM resource_providers AS rp - # JOIN inventories AS i1 - # ON rp.id = i1.resource_provider_id - # LEFT JOIN ( - # SELECT resource_provider_id, resource_class_id, SUM(used) AS used - # FROM allocations - # WHERE resource_class_id IN ($RESOURCE_CLASSES) - # AND resource_provider_id IN ($RESOURCE_PROVIDERS) - # GROUP BY resource_provider_id, resource_class_id - # ) AS allocs - # ON inv.resource_provider_id = allocs.resource_provider_id - # AND inv.resource_class_id = allocs.resource_class_id - # WHERE rp.id IN ($RESOURCE_PROVIDERS) - # AND inv.resource_class_id IN ($RESOURCE_CLASSES) - # - # We then take the results of the above and determine if any of the - # inventory will have its capacity exceeded. - rc_ids = set([_RC_CACHE.id_from_string(a.resource_class) - for a in allocs]) - provider_uuids = set([a.resource_provider.uuid for a in allocs]) - provider_ids = set([a.resource_provider.id for a in allocs]) - usage = sa.select([_ALLOC_TBL.c.resource_provider_id, - _ALLOC_TBL.c.resource_class_id, - sql.func.sum(_ALLOC_TBL.c.used).label('used')]) - usage = usage.where( - sa.and_(_ALLOC_TBL.c.resource_class_id.in_(rc_ids), - _ALLOC_TBL.c.resource_provider_id.in_(provider_ids))) - usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id, - _ALLOC_TBL.c.resource_class_id) - usage = sa.alias(usage, name='usage') - - inv_join = sql.join(_RP_TBL, _INV_TBL, - sql.and_(_RP_TBL.c.id == _INV_TBL.c.resource_provider_id, - _INV_TBL.c.resource_class_id.in_(rc_ids))) - primary_join = sql.outerjoin(inv_join, usage, - sql.and_( - _INV_TBL.c.resource_provider_id == usage.c.resource_provider_id, - _INV_TBL.c.resource_class_id == usage.c.resource_class_id) - ) - cols_in_output = [ - _RP_TBL.c.id.label('resource_provider_id'), - _RP_TBL.c.uuid, - _RP_TBL.c.generation, - _INV_TBL.c.resource_class_id, - _INV_TBL.c.total, - _INV_TBL.c.reserved, - _INV_TBL.c.allocation_ratio, - _INV_TBL.c.min_unit, - _INV_TBL.c.max_unit, - _INV_TBL.c.step_size, - usage.c.used, - ] - - sel = sa.select(cols_in_output).select_from(primary_join) - sel = sel.where( - sa.and_(_RP_TBL.c.id.in_(provider_ids), - _INV_TBL.c.resource_class_id.in_(rc_ids))) - records = ctx.session.execute(sel) - # Create a map keyed by (rp_uuid, res_class) for the records in the DB - usage_map = {} - provs_with_inv = set() - for record in records: - map_key = (record['uuid'], record['resource_class_id']) - if map_key in usage_map: - raise KeyError("%s already in usage_map, bad query" % str(map_key)) - usage_map[map_key] = record - provs_with_inv.add(record["uuid"]) - # Ensure that all providers have existing inventory - missing_provs = provider_uuids - provs_with_inv - if missing_provs: - class_str = ', '.join([_RC_CACHE.string_from_id(rc_id) - for rc_id in rc_ids]) - provider_str = ', '.join(missing_provs) - raise exception.InvalidInventory(resource_class=class_str, - resource_provider=provider_str) - - res_providers = {} - rp_resource_class_sum = collections.defaultdict( - lambda: collections.defaultdict(int)) - for alloc in allocs: - rc_id = _RC_CACHE.id_from_string(alloc.resource_class) - rp_uuid = alloc.resource_provider.uuid - if rp_uuid not in res_providers: - res_providers[rp_uuid] = alloc.resource_provider - amount_needed = alloc.used - rp_resource_class_sum[rp_uuid][rc_id] += amount_needed - # No use checking usage if we're not asking for anything - if amount_needed == 0: - continue - key = (rp_uuid, rc_id) - try: - usage = usage_map[key] - except KeyError: - # The resource class at rc_id is not in the usage map. - raise exception.InvalidInventory( - resource_class=alloc.resource_class, - resource_provider=rp_uuid) - allocation_ratio = usage['allocation_ratio'] - min_unit = usage['min_unit'] - max_unit = usage['max_unit'] - step_size = usage['step_size'] - - # check min_unit, max_unit, step_size - if (amount_needed < min_unit or amount_needed > max_unit or - amount_needed % step_size != 0): - LOG.warning( - "Allocation for %(rc)s on resource provider %(rp)s " - "violates min_unit, max_unit, or step_size. " - "Requested: %(requested)s, min_unit: %(min_unit)s, " - "max_unit: %(max_unit)s, step_size: %(step_size)s", - {'rc': alloc.resource_class, - 'rp': rp_uuid, - 'requested': amount_needed, - 'min_unit': min_unit, - 'max_unit': max_unit, - 'step_size': step_size}) - raise exception.InvalidAllocationConstraintsViolated( - resource_class=alloc.resource_class, - resource_provider=rp_uuid) - - # usage["used"] can be returned as None - used = usage['used'] or 0 - capacity = (usage['total'] - usage['reserved']) * allocation_ratio - if (capacity < (used + amount_needed) or - capacity < (used + rp_resource_class_sum[rp_uuid][rc_id])): - LOG.warning( - "Over capacity for %(rc)s on resource provider %(rp)s. " - "Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s", - {'rc': alloc.resource_class, - 'rp': rp_uuid, - 'needed': amount_needed, - 'used': used, - 'cap': capacity}) - raise exception.InvalidAllocationCapacityExceeded( - resource_class=alloc.resource_class, - resource_provider=rp_uuid) - return res_providers - - -@db_api.placement_context_manager.reader -def _get_allocations_by_provider_id(ctx, rp_id): - allocs = sa.alias(_ALLOC_TBL, name="a") - consumers = sa.alias(_CONSUMER_TBL, name="c") - projects = sa.alias(_PROJECT_TBL, name="p") - users = sa.alias(_USER_TBL, name="u") - cols = [ - allocs.c.id, - allocs.c.resource_class_id, - allocs.c.used, - allocs.c.updated_at, - allocs.c.created_at, - consumers.c.id.label("consumer_id"), - consumers.c.generation.label("consumer_generation"), - sql.func.coalesce( - consumers.c.uuid, allocs.c.consumer_id).label("consumer_uuid"), - projects.c.id.label("project_id"), - projects.c.external_id.label("project_external_id"), - users.c.id.label("user_id"), - users.c.external_id.label("user_external_id"), - ] - # TODO(jaypipes): change this join to be on ID not UUID - consumers_join = sa.join( - allocs, consumers, allocs.c.consumer_id == consumers.c.uuid) - projects_join = sa.join( - consumers_join, projects, consumers.c.project_id == projects.c.id) - users_join = sa.join( - projects_join, users, consumers.c.user_id == users.c.id) - sel = sa.select(cols).select_from(users_join) - sel = sel.where(allocs.c.resource_provider_id == rp_id) - - return [dict(r) for r in ctx.session.execute(sel)] - - -@db_api.placement_context_manager.reader -def _get_allocations_by_consumer_uuid(ctx, consumer_uuid): - allocs = sa.alias(_ALLOC_TBL, name="a") - rp = sa.alias(_RP_TBL, name="rp") - consumer = sa.alias(_CONSUMER_TBL, name="c") - project = sa.alias(_PROJECT_TBL, name="p") - user = sa.alias(_USER_TBL, name="u") - cols = [ - allocs.c.id, - allocs.c.resource_provider_id, - rp.c.name.label("resource_provider_name"), - rp.c.uuid.label("resource_provider_uuid"), - rp.c.generation.label("resource_provider_generation"), - allocs.c.resource_class_id, - allocs.c.used, - consumer.c.id.label("consumer_id"), - consumer.c.generation.label("consumer_generation"), - sql.func.coalesce( - consumer.c.uuid, allocs.c.consumer_id).label("consumer_uuid"), - project.c.id.label("project_id"), - project.c.external_id.label("project_external_id"), - user.c.id.label("user_id"), - user.c.external_id.label("user_external_id"), - ] - # Build up the joins of the five tables we need to interact with. - rp_join = sa.join(allocs, rp, allocs.c.resource_provider_id == rp.c.id) - consumer_join = sa.join(rp_join, consumer, - allocs.c.consumer_id == consumer.c.uuid) - project_join = sa.join(consumer_join, project, - consumer.c.project_id == project.c.id) - user_join = sa.join(project_join, user, - consumer.c.user_id == user.c.id) - - sel = sa.select(cols).select_from(user_join) - sel = sel.where(allocs.c.consumer_id == consumer_uuid) - - return [dict(r) for r in ctx.session.execute(sel)] - - -@db_api.placement_context_manager.writer.independent -def _create_incomplete_consumers_for_provider(ctx, rp_id): - # TODO(jaypipes): Remove in Stein after a blocker migration is added. - """Creates consumer record if consumer relationship between allocations -> - consumers table is missing for any allocation on the supplied provider - internal ID, using the "incomplete consumer" project and user CONF options. - """ - alloc_to_consumer = sa.outerjoin( - _ALLOC_TBL, consumer_obj.CONSUMER_TBL, - _ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid) - sel = sa.select([_ALLOC_TBL.c.consumer_id]) - sel = sel.select_from(alloc_to_consumer) - sel = sel.where( - sa.and_( - _ALLOC_TBL.c.resource_provider_id == rp_id, - consumer_obj.CONSUMER_TBL.c.id.is_(None))) - missing = ctx.session.execute(sel).fetchall() - if missing: - # Do a single INSERT for all missing consumer relationships for the - # provider - incomplete_proj_id = project_obj.ensure_incomplete_project(ctx) - incomplete_user_id = user_obj.ensure_incomplete_user(ctx) - - cols = [ - _ALLOC_TBL.c.consumer_id, - incomplete_proj_id, - incomplete_user_id, - ] - sel = sa.select(cols) - sel = sel.select_from(alloc_to_consumer) - sel = sel.where( - sa.and_( - _ALLOC_TBL.c.resource_provider_id == rp_id, - consumer_obj.CONSUMER_TBL.c.id.is_(None))) - target_cols = ['uuid', 'project_id', 'user_id'] - ins_stmt = consumer_obj.CONSUMER_TBL.insert().from_select( - target_cols, sel) - res = ctx.session.execute(ins_stmt) - if res.rowcount > 0: - LOG.info("Online data migration to fix incomplete consumers " - "for resource provider %s has been run. Migrated %d " - "incomplete consumer records on the fly.", rp_id, - res.rowcount) - - -@db_api.placement_context_manager.writer.independent -def _create_incomplete_consumer(ctx, consumer_id): - # TODO(jaypipes): Remove in Stein after a blocker migration is added. - """Creates consumer record if consumer relationship between allocations -> - consumers table is missing for the supplied consumer UUID, using the - "incomplete consumer" project and user CONF options. - """ - alloc_to_consumer = sa.outerjoin( - _ALLOC_TBL, consumer_obj.CONSUMER_TBL, - _ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid) - sel = sa.select([_ALLOC_TBL.c.consumer_id]) - sel = sel.select_from(alloc_to_consumer) - sel = sel.where( - sa.and_( - _ALLOC_TBL.c.consumer_id == consumer_id, - consumer_obj.CONSUMER_TBL.c.id.is_(None))) - missing = ctx.session.execute(sel).fetchall() - if missing: - incomplete_proj_id = project_obj.ensure_incomplete_project(ctx) - incomplete_user_id = user_obj.ensure_incomplete_user(ctx) - - ins_stmt = consumer_obj.CONSUMER_TBL.insert().values( - uuid=consumer_id, project_id=incomplete_proj_id, - user_id=incomplete_user_id) - res = ctx.session.execute(ins_stmt) - if res.rowcount > 0: - LOG.info("Online data migration to fix incomplete consumers " - "for consumer %s has been run. Migrated %d incomplete " - "consumer records on the fly.", consumer_id, res.rowcount) - - -@base.VersionedObjectRegistry.register_if(False) -class AllocationList(base.ObjectListBase, base.VersionedObject): - - # The number of times to retry set_allocations if there has - # been a resource provider (not consumer) generation coflict. - RP_CONFLICT_RETRY_COUNT = 10 - - fields = { - 'objects': fields.ListOfObjectsField('Allocation'), - } - - @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) - @db_api.placement_context_manager.writer - def _set_allocations(self, context, allocs): - """Write a set of allocations. - - We must check that there is capacity for each allocation. - If there is not we roll back the entire set. - - :raises `exception.ResourceClassNotFound` if any resource class in any - allocation in allocs cannot be found in either the standard - classes or the DB. - :raises `exception.InvalidAllocationCapacityExceeded` if any inventory - would be exhausted by the allocation. - :raises `InvalidAllocationConstraintsViolated` if any of the - `step_size`, `min_unit` or `max_unit` constraints in an - inventory will be violated by any one of the allocations. - :raises `ConcurrentUpdateDetected` if a generation for a resource - provider or consumer failed its increment check. - """ - # First delete any existing allocations for any consumers. This - # provides a clean slate for the consumers mentioned in the list of - # allocations being manipulated. - consumer_ids = set(alloc.consumer.uuid for alloc in allocs) - for consumer_id in consumer_ids: - _delete_allocations_for_consumer(context, consumer_id) - - # Before writing any allocation records, we check that the submitted - # allocations do not cause any inventory capacity to be exceeded for - # any resource provider and resource class involved in the allocation - # transaction. _check_capacity_exceeded() raises an exception if any - # inventory capacity is exceeded. If capacity is not exceeeded, the - # function returns a list of ResourceProvider objects containing the - # generation of the resource provider at the time of the check. These - # objects are used at the end of the allocation transaction as a guard - # against concurrent updates. - # - # Don't check capacity when alloc.used is zero. Zero is not a valid - # amount when making an allocation (the minimum consumption of a - # resource is one) but is used in this method to indicate a need for - # removal. Providing 0 is controlled at the HTTP API layer where PUT - # /allocations does not allow empty allocations. When POST /allocations - # is implemented it will for the special case of atomically setting and - # removing different allocations in the same request. - # _check_capacity_exceeded will raise a ResourceClassNotFound # if any - # allocation is using a resource class that does not exist. - visited_consumers = {} - visited_rps = _check_capacity_exceeded(context, allocs) - for alloc in allocs: - if alloc.consumer.id not in visited_consumers: - visited_consumers[alloc.consumer.id] = alloc.consumer - - # If alloc.used is set to zero that is a signal that we don't want - # to (re-)create any allocations for this resource class. - # _delete_current_allocs has already wiped out allocations so just - # continue - if alloc.used == 0: - continue - consumer_id = alloc.consumer.uuid - rp = alloc.resource_provider - rc_id = _RC_CACHE.id_from_string(alloc.resource_class) - ins_stmt = _ALLOC_TBL.insert().values( - resource_provider_id=rp.id, - resource_class_id=rc_id, - consumer_id=consumer_id, - used=alloc.used) - res = context.session.execute(ins_stmt) - alloc.id = res.lastrowid - alloc.obj_reset_changes() - - # Generation checking happens here. If the inventory for this resource - # provider changed out from under us, this will raise a - # ConcurrentUpdateDetected which can be caught by the caller to choose - # to try again. It will also rollback the transaction so that these - # changes always happen atomically. - for rp in visited_rps.values(): - rp.generation = _increment_provider_generation(context, rp) - for consumer in visited_consumers.values(): - consumer.increment_generation() - # If any consumers involved in this transaction ended up having no - # allocations, delete the consumer records. Exclude consumers that had - # *some resource* in the allocation list with a total > 0 since clearly - # those consumers have allocations... - cons_with_allocs = set(a.consumer.uuid for a in allocs if a.used > 0) - all_cons = set(c.uuid for c in visited_consumers.values()) - consumers_to_check = all_cons - cons_with_allocs - consumer_obj.delete_consumers_if_no_allocations( - context, consumers_to_check) - - @classmethod - def get_all_by_resource_provider(cls, context, rp): - _create_incomplete_consumers_for_provider(context, rp.id) - db_allocs = _get_allocations_by_provider_id(context, rp.id) - # Build up a list of Allocation objects, setting the Allocation object - # fields to the same-named database record field we got from - # _get_allocations_by_provider_id(). We already have the - # ResourceProvider object so we just pass that object to the Allocation - # object constructor as-is - objs = [] - for rec in db_allocs: - consumer = consumer_obj.Consumer( - context, id=rec['consumer_id'], - uuid=rec['consumer_uuid'], - generation=rec['consumer_generation'], - project=project_obj.Project( - context, id=rec['project_id'], - external_id=rec['project_external_id']), - user=user_obj.User( - context, id=rec['user_id'], - external_id=rec['user_external_id'])) - objs.append( - Allocation( - context, id=rec['id'], resource_provider=rp, - resource_class=_RC_CACHE.string_from_id( - rec['resource_class_id']), - consumer=consumer, - used=rec['used'])) - alloc_list = cls(context, objects=objs) - return alloc_list - - @classmethod - def get_all_by_consumer_id(cls, context, consumer_id): - _create_incomplete_consumer(context, consumer_id) - db_allocs = _get_allocations_by_consumer_uuid(context, consumer_id) - - if db_allocs: - # Build up the Consumer object (it's the same for all allocations - # since we looked up by consumer ID) - db_first = db_allocs[0] - consumer = consumer_obj.Consumer( - context, id=db_first['consumer_id'], - uuid=db_first['consumer_uuid'], - generation=db_first['consumer_generation'], - project=project_obj.Project( - context, id=db_first['project_id'], - external_id=db_first['project_external_id']), - user=user_obj.User( - context, id=db_first['user_id'], - external_id=db_first['user_external_id'])) - - # Build up a list of Allocation objects, setting the Allocation object - # fields to the same-named database record field we got from - # _get_allocations_by_consumer_id(). - # - # NOTE(jaypipes): Unlike with get_all_by_resource_provider(), we do - # NOT already have the ResourceProvider object so we construct a new - # ResourceProvider object below by looking at the resource provider - # fields returned by _get_allocations_by_consumer_id(). - objs = [ - Allocation( - context, id=rec['id'], - resource_provider=ResourceProvider( - context, - id=rec['resource_provider_id'], - uuid=rec['resource_provider_uuid'], - name=rec['resource_provider_name'], - generation=rec['resource_provider_generation']), - resource_class=_RC_CACHE.string_from_id( - rec['resource_class_id']), - consumer=consumer, - used=rec['used']) - for rec in db_allocs - ] - alloc_list = cls(context, objects=objs) - return alloc_list - - def replace_all(self): - """Replace the supplied allocations. - - :note: This method always deletes all allocations for all consumers - referenced in the list of Allocation objects and then replaces - the consumer's allocations with the Allocation objects. In doing - so, it will end up setting the Allocation.id attribute of each - Allocation object. - """ - # Retry _set_allocations server side if there is a - # ResourceProviderConcurrentUpdateDetected. We don't care about - # sleeping, we simply want to reset the resource provider objects - # and try again. For sake of simplicity (and because we don't have - # easy access to the information) we reload all the resource - # providers that may be present. - retries = self.RP_CONFLICT_RETRY_COUNT - while retries: - retries -= 1 - try: - self._set_allocations(self._context, self.objects) - break - except exception.ResourceProviderConcurrentUpdateDetected: - LOG.debug('Retrying allocations write on resource provider ' - 'generation conflict') - # We only want to reload each unique resource provider once. - alloc_rp_uuids = set( - alloc.resource_provider.uuid for alloc in self.objects) - seen_rps = {} - for rp_uuid in alloc_rp_uuids: - seen_rps[rp_uuid] = ResourceProvider.get_by_uuid( - self._context, rp_uuid) - for alloc in self.objects: - rp_uuid = alloc.resource_provider.uuid - alloc.resource_provider = seen_rps[rp_uuid] - else: - # We ran out of retries so we need to raise again. - # The log will automatically have request id info associated with - # it that will allow tracing back to specific allocations. - # Attempting to extract specific consumer or resource provider - # information from the allocations is not coherent as this - # could be multiple consumers and providers. - LOG.warning('Exceeded retry limit of %d on allocations write', - self.RP_CONFLICT_RETRY_COUNT) - raise exception.ResourceProviderConcurrentUpdateDetected() - - def delete_all(self): - consumer_uuids = set(alloc.consumer.uuid for alloc in self.objects) - alloc_ids = [alloc.id for alloc in self.objects] - _delete_allocations_by_ids(self._context, alloc_ids) - consumer_obj.delete_consumers_if_no_allocations( - self._context, consumer_uuids) - - def __repr__(self): - strings = [repr(x) for x in self.objects] - return "AllocationList[" + ", ".join(strings) + "]" - - -@base.VersionedObjectRegistry.register_if(False) -class Usage(base.VersionedObject): - - fields = { - 'resource_class': rc_fields.ResourceClassField(read_only=True), - 'usage': fields.NonNegativeIntegerField(), - } - - @staticmethod - def _from_db_object(context, target, source): - for field in target.fields: - if field not in ('resource_class'): - setattr(target, field, source[field]) - - if 'resource_class' not in target: - rc_str = _RC_CACHE.string_from_id(source['resource_class_id']) - target.resource_class = rc_str - - target._context = context - target.obj_reset_changes() - return target - - -@base.VersionedObjectRegistry.register_if(False) -class UsageList(base.ObjectListBase, base.VersionedObject): - - fields = { - 'objects': fields.ListOfObjectsField('Usage'), - } - - @staticmethod - @db_api.placement_context_manager.reader - def _get_all_by_resource_provider_uuid(context, rp_uuid): - query = (context.session.query(models.Inventory.resource_class_id, - func.coalesce(func.sum(models.Allocation.used), 0)) - .join(models.ResourceProvider, - models.Inventory.resource_provider_id == - models.ResourceProvider.id) - .outerjoin(models.Allocation, - sql.and_(models.Inventory.resource_provider_id == - models.Allocation.resource_provider_id, - models.Inventory.resource_class_id == - models.Allocation.resource_class_id)) - .filter(models.ResourceProvider.uuid == rp_uuid) - .group_by(models.Inventory.resource_class_id)) - result = [dict(resource_class_id=item[0], usage=item[1]) - for item in query.all()] - return result - - @staticmethod - @db_api.placement_context_manager.reader - def _get_all_by_project_user(context, project_id, user_id=None): - query = (context.session.query(models.Allocation.resource_class_id, - func.coalesce(func.sum(models.Allocation.used), 0)) - .join(models.Consumer, - models.Allocation.consumer_id == models.Consumer.uuid) - .join(models.Project, - models.Consumer.project_id == models.Project.id) - .filter(models.Project.external_id == project_id)) - if user_id: - query = query.join(models.User, - models.Consumer.user_id == models.User.id) - query = query.filter(models.User.external_id == user_id) - query = query.group_by(models.Allocation.resource_class_id) - result = [dict(resource_class_id=item[0], usage=item[1]) - for item in query.all()] - return result - - @classmethod - def get_all_by_resource_provider_uuid(cls, context, rp_uuid): - usage_list = cls._get_all_by_resource_provider_uuid(context, rp_uuid) - return base.obj_make_list(context, cls(context), Usage, usage_list) - - @classmethod - def get_all_by_project_user(cls, context, project_id, user_id=None): - usage_list = cls._get_all_by_project_user(context, project_id, - user_id=user_id) - return base.obj_make_list(context, cls(context), Usage, usage_list) - - def __repr__(self): - strings = [repr(x) for x in self.objects] - return "UsageList[" + ", ".join(strings) + "]" - - -@base.VersionedObjectRegistry.register_if(False) -class ResourceClass(base.VersionedObject, base.TimestampedObject): - - MIN_CUSTOM_RESOURCE_CLASS_ID = 10000 - """Any user-defined resource classes must have an identifier greater than - or equal to this number. - """ - - # Retry count for handling possible race condition in creating resource - # class. We don't ever want to hit this, as it is simply a race when - # creating these classes, but this is just a stopgap to prevent a potential - # infinite loop. - RESOURCE_CREATE_RETRY_COUNT = 100 - - fields = { - 'id': fields.IntegerField(read_only=True), - 'name': rc_fields.ResourceClassField(nullable=False), - } - - @staticmethod - def _from_db_object(context, target, source): - for field in target.fields: - setattr(target, field, source[field]) - - target._context = context - target.obj_reset_changes() - return target - - @classmethod - def get_by_name(cls, context, name): - """Return a ResourceClass object with the given string name. - - :param name: String name of the resource class to find - - :raises: ResourceClassNotFound if no such resource class was found - """ - rc = _RC_CACHE.all_from_string(name) - obj = cls(context, id=rc['id'], name=rc['name'], - updated_at=rc['updated_at'], created_at=rc['created_at']) - obj.obj_reset_changes() - return obj - - @staticmethod - @db_api.placement_context_manager.reader - def _get_next_id(context): - """Utility method to grab the next resource class identifier to use for - user-defined resource classes. - """ - query = context.session.query(func.max(models.ResourceClass.id)) - max_id = query.one()[0] - if not max_id: - return ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID - else: - return max_id + 1 - - def create(self): - if 'id' in self: - raise exception.ObjectActionError(action='create', - reason='already created') - if 'name' not in self: - raise exception.ObjectActionError(action='create', - reason='name is required') - if self.name in rc_fields.ResourceClass.STANDARD: - raise exception.ResourceClassExists(resource_class=self.name) - - if not self.name.startswith(rc_fields.ResourceClass.CUSTOM_NAMESPACE): - raise exception.ObjectActionError( - action='create', - reason='name must start with ' + - rc_fields.ResourceClass.CUSTOM_NAMESPACE) - - updates = self.obj_get_changes() - # There is the possibility of a race when adding resource classes, as - # the ID is generated locally. This loop catches that exception, and - # retries until either it succeeds, or a different exception is - # encountered. - retries = self.RESOURCE_CREATE_RETRY_COUNT - while retries: - retries -= 1 - try: - rc = self._create_in_db(self._context, updates) - self._from_db_object(self._context, self, rc) - break - except db_exc.DBDuplicateEntry as e: - if 'id' in e.columns: - # Race condition for ID creation; try again - continue - # The duplication is on the other unique column, 'name'. So do - # not retry; raise the exception immediately. - raise exception.ResourceClassExists(resource_class=self.name) - else: - # We have no idea how common it will be in practice for the retry - # limit to be exceeded. We set it high in the hope that we never - # hit this point, but added this log message so we know that this - # specific situation occurred. - LOG.warning("Exceeded retry limit on ID generation while " - "creating ResourceClass %(name)s", - {'name': self.name}) - msg = _("creating resource class %s") % self.name - raise exception.MaxDBRetriesExceeded(action=msg) - - @staticmethod - @db_api.placement_context_manager.writer - def _create_in_db(context, updates): - next_id = ResourceClass._get_next_id(context) - rc = models.ResourceClass() - rc.update(updates) - rc.id = next_id - context.session.add(rc) - return rc - - def destroy(self): - if 'id' not in self: - raise exception.ObjectActionError(action='destroy', - reason='ID attribute not found') - # Never delete any standard resource class, since the standard resource - # classes don't even exist in the database table anyway. - if self.id in (rc['id'] for rc in _RC_CACHE.STANDARDS): - raise exception.ResourceClassCannotDeleteStandard( - resource_class=self.name) - - self._destroy(self._context, self.id, self.name) - _RC_CACHE.clear() - - @staticmethod - @db_api.placement_context_manager.writer - def _destroy(context, _id, name): - # Don't delete the resource class if it is referred to in the - # inventories table. - num_inv = context.session.query(models.Inventory).filter( - models.Inventory.resource_class_id == _id).count() - if num_inv: - raise exception.ResourceClassInUse(resource_class=name) - - res = context.session.query(models.ResourceClass).filter( - models.ResourceClass.id == _id).delete() - if not res: - raise exception.NotFound() - - def save(self): - if 'id' not in self: - raise exception.ObjectActionError(action='save', - reason='ID attribute not found') - updates = self.obj_get_changes() - # Never update any standard resource class, since the standard resource - # classes don't even exist in the database table anyway. - if self.id in (rc['id'] for rc in _RC_CACHE.STANDARDS): - raise exception.ResourceClassCannotUpdateStandard( - resource_class=self.name) - self._save(self._context, self.id, self.name, updates) - _RC_CACHE.clear() - - @staticmethod - @db_api.placement_context_manager.writer - def _save(context, id, name, updates): - db_rc = context.session.query(models.ResourceClass).filter_by( - id=id).first() - db_rc.update(updates) - try: - db_rc.save(context.session) - except db_exc.DBDuplicateEntry: - raise exception.ResourceClassExists(resource_class=name) - - -@base.VersionedObjectRegistry.register_if(False) -class ResourceClassList(base.ObjectListBase, base.VersionedObject): - - fields = { - 'objects': fields.ListOfObjectsField('ResourceClass'), - } - - @staticmethod - @db_api.placement_context_manager.reader - def _get_all(context): - customs = list(context.session.query(models.ResourceClass).all()) - return _RC_CACHE.STANDARDS + customs - - @classmethod - def get_all(cls, context): - resource_classes = cls._get_all(context) - return base.obj_make_list(context, cls(context), - ResourceClass, resource_classes) - - def __repr__(self): - strings = [repr(x) for x in self.objects] - return "ResourceClassList[" + ", ".join(strings) + "]" - - -@base.VersionedObjectRegistry.register_if(False) -class Trait(base.VersionedObject, base.TimestampedObject): - - # All the user-defined traits must begin with this prefix. - CUSTOM_NAMESPACE = 'CUSTOM_' - - fields = { - 'id': fields.IntegerField(read_only=True), - 'name': fields.StringField(nullable=False) - } - - @staticmethod - def _from_db_object(context, trait, db_trait): - for key in trait.fields: - setattr(trait, key, db_trait[key]) - trait.obj_reset_changes() - trait._context = context - return trait - - @staticmethod - @db_api.placement_context_manager.writer - def _create_in_db(context, updates): - trait = models.Trait() - trait.update(updates) - context.session.add(trait) - return trait - - def create(self): - if 'id' in self: - raise exception.ObjectActionError(action='create', - reason='already created') - if 'name' not in self: - raise exception.ObjectActionError(action='create', - reason='name is required') - - updates = self.obj_get_changes() - - try: - db_trait = self._create_in_db(self._context, updates) - except db_exc.DBDuplicateEntry: - raise exception.TraitExists(name=self.name) - - self._from_db_object(self._context, self, db_trait) - - @staticmethod - @db_api.placement_context_manager.writer # trait sync can cause a write - def _get_by_name_from_db(context, name): - result = context.session.query(models.Trait).filter_by( - name=name).first() - if not result: - raise exception.TraitNotFound(names=name) - return result - - @classmethod - def get_by_name(cls, context, name): - db_trait = cls._get_by_name_from_db(context, six.text_type(name)) - return cls._from_db_object(context, cls(), db_trait) - - @staticmethod - @db_api.placement_context_manager.writer - def _destroy_in_db(context, _id, name): - num = context.session.query(models.ResourceProviderTrait).filter( - models.ResourceProviderTrait.trait_id == _id).count() - if num: - raise exception.TraitInUse(name=name) - - res = context.session.query(models.Trait).filter_by( - name=name).delete() - if not res: - raise exception.TraitNotFound(names=name) - - def destroy(self): - if 'name' not in self: - raise exception.ObjectActionError(action='destroy', - reason='name is required') - - if not self.name.startswith(self.CUSTOM_NAMESPACE): - raise exception.TraitCannotDeleteStandard(name=self.name) - - if 'id' not in self: - raise exception.ObjectActionError(action='destroy', - reason='ID attribute not found') - - self._destroy_in_db(self._context, self.id, self.name) - - -@base.VersionedObjectRegistry.register_if(False) -class TraitList(base.ObjectListBase, base.VersionedObject): - - fields = { - 'objects': fields.ListOfObjectsField('Trait') - } - - @staticmethod - @db_api.placement_context_manager.writer # trait sync can cause a write - def _get_all_from_db(context, filters): - if not filters: - filters = {} - - query = context.session.query(models.Trait) - if 'name_in' in filters: - query = query.filter(models.Trait.name.in_( - [six.text_type(n) for n in filters['name_in']] - )) - if 'prefix' in filters: - query = query.filter( - models.Trait.name.like(six.text_type(filters['prefix'] + '%'))) - if 'associated' in filters: - if filters['associated']: - query = query.join(models.ResourceProviderTrait, - models.Trait.id == models.ResourceProviderTrait.trait_id - ).distinct() - else: - query = query.outerjoin(models.ResourceProviderTrait, - models.Trait.id == models.ResourceProviderTrait.trait_id - ).filter(models.ResourceProviderTrait.trait_id == null()) - - return query.all() - - @base.remotable_classmethod - def get_all(cls, context, filters=None): - db_traits = cls._get_all_from_db(context, filters) - return base.obj_make_list(context, cls(context), Trait, db_traits) - - @classmethod - def get_all_by_resource_provider(cls, context, rp): - """Returns a TraitList containing Trait objects for any trait - associated with the supplied resource provider. - """ - db_traits = _get_traits_by_provider_id(context, rp.id) - return base.obj_make_list(context, cls(context), Trait, db_traits) - - -@base.VersionedObjectRegistry.register_if(False) -class AllocationRequestResource(base.VersionedObject): - - fields = { - 'resource_provider': fields.ObjectField('ResourceProvider'), - 'resource_class': rc_fields.ResourceClassField(read_only=True), - 'amount': fields.NonNegativeIntegerField(), - } - - -@base.VersionedObjectRegistry.register_if(False) -class AllocationRequest(base.VersionedObject): - - fields = { - # UUID of (the root of the tree including) the non-sharing resource - # provider associated with this AllocationRequest. Internal use only, - # not included when the object is serialized for output. - 'anchor_root_provider_uuid': fields.UUIDField(), - # Whether all AllocationRequestResources in this AllocationRequest are - # required to be satisfied by the same provider (based on the - # corresponding RequestGroup's use_same_provider attribute). Internal - # use only, not included when the object is serialized for output. - 'use_same_provider': fields.BooleanField(), - 'resource_requests': fields.ListOfObjectsField( - 'AllocationRequestResource' - ), - } - - def __repr__(self): - anchor = (self.anchor_root_provider_uuid[-8:] - if 'anchor_root_provider_uuid' in self else '') - usp = self.use_same_provider if 'use_same_provider' in self else '' - repr_str = ('%s(anchor=...%s, same_provider=%s, ' - 'resource_requests=[%s])' % - (self.obj_name(), anchor, usp, - ', '.join([str(arr) for arr in self.resource_requests]))) - if six.PY2: - repr_str = encodeutils.safe_encode(repr_str, incoming='utf-8') - return repr_str - - -@base.VersionedObjectRegistry.register_if(False) -class ProviderSummaryResource(base.VersionedObject): - - fields = { - 'resource_class': rc_fields.ResourceClassField(read_only=True), - 'capacity': fields.NonNegativeIntegerField(), - 'used': fields.NonNegativeIntegerField(), - # Internal use only; not included when the object is serialized for - # output. - 'max_unit': fields.NonNegativeIntegerField(), - } - - -@base.VersionedObjectRegistry.register_if(False) -class ProviderSummary(base.VersionedObject): - - fields = { - 'resource_provider': fields.ObjectField('ResourceProvider'), - 'resources': fields.ListOfObjectsField('ProviderSummaryResource'), - 'traits': fields.ListOfObjectsField('Trait'), - } - - @property - def resource_class_names(self): - """Helper property that returns a set() of resource class string names - that are included in the provider summary. - """ - return set(res.resource_class for res in self.resources) - - -@db_api.placement_context_manager.reader -def _get_usages_by_provider_tree(ctx, root_ids): - """Returns a row iterator of usage records grouped by provider ID - for all resource providers in all trees indicated in the ``root_ids``. - """ - # We build up a SQL expression that looks like this: - # SELECT - # rp.id as resource_provider_id - # , rp.uuid as resource_provider_uuid - # , inv.resource_class_id - # , inv.total - # , inv.reserved - # , inv.allocation_ratio - # , inv.max_unit - # , usage.used - # FROM resource_providers AS rp - # LEFT JOIN inventories AS inv - # ON rp.id = inv.resource_provider_id - # LEFT JOIN ( - # SELECT resource_provider_id, resource_class_id, SUM(used) as used - # FROM allocations - # JOIN resource_providers - # ON allocations.resource_provider_id = resource_providers.id - # AND resource_providers.root_provider_id IN($root_ids) - # GROUP BY resource_provider_id, resource_class_id - # ) - # AS usages - # ON inv.resource_provider_id = usage.resource_provider_id - # AND inv.resource_class_id = usage.resource_class_id - # WHERE rp.root_provider_id IN ($root_ids) - rpt = sa.alias(_RP_TBL, name="rp") - inv = sa.alias(_INV_TBL, name="inv") - # Build our derived table (subquery in the FROM clause) that sums used - # amounts for resource provider and resource class - derived_alloc_to_rp = sa.join( - _ALLOC_TBL, _RP_TBL, - sa.and_(_ALLOC_TBL.c.resource_provider_id == _RP_TBL.c.id, - _RP_TBL.c.root_provider_id.in_(root_ids))) - usage = sa.alias( - sa.select([ - _ALLOC_TBL.c.resource_provider_id, - _ALLOC_TBL.c.resource_class_id, - sql.func.sum(_ALLOC_TBL.c.used).label('used'), - ]).select_from(derived_alloc_to_rp).group_by( - _ALLOC_TBL.c.resource_provider_id, - _ALLOC_TBL.c.resource_class_id - ), - name='usage') - # Build a join between the resource providers and inventories table - rpt_inv_join = sa.outerjoin(rpt, inv, - rpt.c.id == inv.c.resource_provider_id) - # And then join to the derived table of usages - usage_join = sa.outerjoin( - rpt_inv_join, - usage, - sa.and_( - usage.c.resource_provider_id == inv.c.resource_provider_id, - usage.c.resource_class_id == inv.c.resource_class_id, - ), - ) - query = sa.select([ - rpt.c.id.label("resource_provider_id"), - rpt.c.uuid.label("resource_provider_uuid"), - inv.c.resource_class_id, - inv.c.total, - inv.c.reserved, - inv.c.allocation_ratio, - inv.c.max_unit, - usage.c.used, - ]).select_from(usage_join).where(rpt.c.root_provider_id.in_(root_ids)) - return ctx.session.execute(query).fetchall() - - -@db_api.placement_context_manager.reader -def _get_provider_ids_having_any_trait(ctx, traits): - """Returns a list of resource provider internal IDs that have ANY of the - supplied traits. - - :param ctx: Session context to use - :param traits: A map, keyed by trait string name, of trait internal IDs, at - least one of which each provider must have associated with - it. - :raise ValueError: If traits is empty or None. - """ - if not traits: - raise ValueError(_('traits must not be empty')) - - rptt = sa.alias(_RP_TRAIT_TBL, name="rpt") - sel = sa.select([rptt.c.resource_provider_id]) - sel = sel.where(rptt.c.trait_id.in_(traits.values())) - sel = sel.group_by(rptt.c.resource_provider_id) - return [r[0] for r in ctx.session.execute(sel)] - - -@db_api.placement_context_manager.reader -def _get_provider_ids_having_all_traits(ctx, required_traits): - """Returns a list of resource provider internal IDs that have ALL of the - required traits. - - NOTE: Don't call this method with no required_traits. - - :param ctx: Session context to use - :param required_traits: A map, keyed by trait string name, of required - trait internal IDs that each provider must have - associated with it - :raise ValueError: If required_traits is empty or None. - """ - if not required_traits: - raise ValueError(_('required_traits must not be empty')) - - rptt = sa.alias(_RP_TRAIT_TBL, name="rpt") - sel = sa.select([rptt.c.resource_provider_id]) - sel = sel.where(rptt.c.trait_id.in_(required_traits.values())) - sel = sel.group_by(rptt.c.resource_provider_id) - # Only get the resource providers that have ALL the required traits, so we - # need to GROUP BY the resource provider and ensure that the - # COUNT(trait_id) is equal to the number of traits we are requiring - num_traits = len(required_traits) - cond = sa.func.count(rptt.c.trait_id) == num_traits - sel = sel.having(cond) - return [r[0] for r in ctx.session.execute(sel)] - - -@db_api.placement_context_manager.reader -def _has_provider_trees(ctx): - """Simple method that returns whether provider trees (i.e. nested resource - providers) are in use in the deployment at all. This information is used to - switch code paths when attempting to retrieve allocation candidate - information. The code paths are eminently easier to execute and follow for - non-nested scenarios... - - NOTE(jaypipes): The result of this function can be cached extensively. - """ - sel = sa.select([_RP_TBL.c.id]) - sel = sel.where(_RP_TBL.c.parent_provider_id.isnot(None)) - sel = sel.limit(1) - res = ctx.session.execute(sel).fetchall() - return len(res) > 0 - - -@db_api.placement_context_manager.reader -def _get_provider_ids_matching(ctx, resources, required_traits, - forbidden_traits, member_of=None): - """Returns a list of tuples of (internal provider ID, root provider ID) - that have available inventory to satisfy all the supplied requests for - resources. - - :note: This function is used for scenarios that do NOT involve sharing - providers. - - :param ctx: Session context to use - :param resources: A dict, keyed by resource class ID, of the amount - requested of that resource class. - :param required_traits: A map, keyed by trait string name, of required - trait internal IDs that each provider must have - associated with it - :param forbidden_traits: A map, keyed by trait string name, of forbidden - trait internal IDs that each provider must not - have associated with it - :param member_of: An optional list of list of aggregate UUIDs. If provided, - the allocation_candidates returned will only be for - resource providers that are members of one or more of the - supplied aggregates of each aggregate UUID list. - """ - trait_rps = None - forbidden_rp_ids = None - if required_traits: - trait_rps = _get_provider_ids_having_all_traits(ctx, required_traits) - if not trait_rps: - return [] - if forbidden_traits: - forbidden_rp_ids = _get_provider_ids_having_any_trait( - ctx, forbidden_traits) - - rpt = sa.alias(_RP_TBL, name="rp") - - rc_name_map = { - rc_id: _RC_CACHE.string_from_id(rc_id).lower() for rc_id in resources - } - - # Dict, keyed by resource class ID, of an aliased table object for the - # inventories table winnowed to only that resource class. - inv_tables = { - rc_id: sa.alias(_INV_TBL, name='inv_%s' % rc_name_map[rc_id]) - for rc_id in resources - } - - # Dict, keyed by resource class ID, of a derived table (subquery in the - # FROM clause or JOIN) against the allocations table winnowed to only that - # resource class, grouped by resource provider. - usage_tables = { - rc_id: sa.alias( - sa.select([ - _ALLOC_TBL.c.resource_provider_id, - sql.func.sum(_ALLOC_TBL.c.used).label('used'), - ]).where( - _ALLOC_TBL.c.resource_class_id == rc_id - ).group_by( - _ALLOC_TBL.c.resource_provider_id - ), - name='usage_%s' % rc_name_map[rc_id], - ) - for rc_id in resources - } - - sel = sa.select([rpt.c.id, rpt.c.root_provider_id]) - - # List of the WHERE conditions we build up by iterating over the requested - # resources - where_conds = [] - - # First filter by the resource providers that had all the required traits - if trait_rps: - where_conds.append(rpt.c.id.in_(trait_rps)) - # or have any forbidden trait - if forbidden_rp_ids: - where_conds.append(~rpt.c.id.in_(forbidden_rp_ids)) - - # The chain of joins that we eventually pass to select_from() - join_chain = rpt - - for rc_id, amount in resources.items(): - inv_by_rc = inv_tables[rc_id] - usage_by_rc = usage_tables[rc_id] - - # We can do a more efficient INNER JOIN because we don't have shared - # resource providers to deal with - rp_inv_join = sa.join( - join_chain, inv_by_rc, - sa.and_( - inv_by_rc.c.resource_provider_id == rpt.c.id, - # Add a join condition winnowing this copy of inventories table - # to only the resource class being analyzed in this loop... - inv_by_rc.c.resource_class_id == rc_id, - ), - ) - rp_inv_usage_join = sa.outerjoin( - rp_inv_join, usage_by_rc, - inv_by_rc.c.resource_provider_id == - usage_by_rc.c.resource_provider_id, - ) - join_chain = rp_inv_usage_join - - usage_cond = sa.and_( - ( - (sql.func.coalesce(usage_by_rc.c.used, 0) + amount) <= - (inv_by_rc.c.total - inv_by_rc.c.reserved) * - inv_by_rc.c.allocation_ratio - ), - inv_by_rc.c.min_unit <= amount, - inv_by_rc.c.max_unit >= amount, - amount % inv_by_rc.c.step_size == 0, - ) - where_conds.append(usage_cond) - - # If 'member_of' has values, do a separate lookup to identify the - # resource providers that meet the member_of constraints. - if member_of: - rps_in_aggs = _provider_ids_matching_aggregates(ctx, member_of) - if not rps_in_aggs: - # Short-circuit. The user either asked for a non-existing - # aggregate or there were no resource providers that matched - # the requirements... - return [] - where_conds.append(rpt.c.id.in_(rps_in_aggs)) - - sel = sel.select_from(join_chain) - sel = sel.where(sa.and_(*where_conds)) - - return [(r[0], r[1]) for r in ctx.session.execute(sel)] - - -@db_api.placement_context_manager.reader -def _provider_aggregates(ctx, rp_ids): - """Given a list of resource provider internal IDs, returns a dict, - keyed by those provider IDs, of sets of aggregate ids associated - with that provider. - - :raises: ValueError when rp_ids is empty. - - :param ctx: nova.context.RequestContext object - :param rp_ids: list of resource provider IDs - """ - if not rp_ids: - raise ValueError(_("Expected rp_ids to be a list of resource provider " - "internal IDs, but got an empty list.")) - - rpat = sa.alias(_RP_AGG_TBL, name='rpat') - sel = sa.select([rpat.c.resource_provider_id, - rpat.c.aggregate_id]) - sel = sel.where(rpat.c.resource_provider_id.in_(rp_ids)) - res = collections.defaultdict(set) - for r in ctx.session.execute(sel): - res[r[0]].add(r[1]) - return res - - -@db_api.placement_context_manager.reader -def _get_providers_with_resource(ctx, rc_id, amount): - """Returns a set of tuples of (provider ID, root provider ID) of providers - that satisfy the request for a single resource class. - - :param ctx: Session context to use - :param rc_id: Internal ID of resource class to check inventory for - :param amount: Amount of resource being requested - """ - # SELECT rp.id, rp.root_provider_id - # FROM resource_providers AS rp - # JOIN inventories AS inv - # ON rp.id = inv.resource_provider_id - # AND inv.resource_class_id = $RC_ID - # LEFT JOIN ( - # SELECT - # alloc.resource_provider_id, - # SUM(allocs.used) AS used - # FROM allocations AS alloc - # WHERE allocs.resource_class_id = $RC_ID - # GROUP BY allocs.resource_provider_id - # ) AS usage - # ON inv.resource_provider_id = usage.resource_provider_id - # WHERE - # used + $AMOUNT <= ((total - reserved) * inv.allocation_ratio) - # AND inv.min_unit <= $AMOUNT - # AND inv.max_unit >= $AMOUNT - # AND $AMOUNT % inv.step_size == 0 - rpt = sa.alias(_RP_TBL, name="rp") - inv = sa.alias(_INV_TBL, name="inv") - allocs = sa.alias(_ALLOC_TBL, name="alloc") - usage = sa.select([ - allocs.c.resource_provider_id, - sql.func.sum(allocs.c.used).label('used')]) - usage = usage.where(allocs.c.resource_class_id == rc_id) - usage = usage.group_by(allocs.c.resource_provider_id) - usage = sa.alias(usage, name="usage") - where_conds = [ - sql.func.coalesce(usage.c.used, 0) + amount <= ( - (inv.c.total - inv.c.reserved) * inv.c.allocation_ratio), - inv.c.min_unit <= amount, - inv.c.max_unit >= amount, - amount % inv.c.step_size == 0, - ] - rp_to_inv = sa.join( - rpt, inv, sa.and_( - rpt.c.id == inv.c.resource_provider_id, - inv.c.resource_class_id == rc_id)) - inv_to_usage = sa.outerjoin( - rp_to_inv, usage, - inv.c.resource_provider_id == usage.c.resource_provider_id) - sel = sa.select([rpt.c.id, rpt.c.root_provider_id]) - sel = sel.select_from(inv_to_usage) - sel = sel.where(sa.and_(*where_conds)) - res = ctx.session.execute(sel).fetchall() - res = set((r[0], r[1]) for r in res) - return res - - -@db_api.placement_context_manager.reader -def _get_trees_with_traits(ctx, rp_ids, required_traits, forbidden_traits): - """Given a list of provider IDs, filter them to return a set of tuples of - (provider ID, root provider ID) of providers which belong to a tree that - can satisfy trait requirements. - - :param ctx: Session context to use - :param rp_ids: a set of resource provider IDs - :param required_traits: A map, keyed by trait string name, of required - trait internal IDs that each provider TREE must - COLLECTIVELY have associated with it - :param forbidden_traits: A map, keyed by trait string name, of trait - internal IDs that a resource provider must - not have. - """ - # We now want to restrict the returned providers to only those provider - # trees that have all our required traits. - # - # The SQL we want looks like this: - # - # SELECT outer_rp.id, outer_rp.root_provider_id - # FROM resource_providers AS outer_rp - # JOIN ( - # SELECT rp.root_provider_id - # FROM resource_providers AS rp - # # Only if we have required traits... - # INNER JOIN resource_provider_traits AS rptt - # ON rp.id = rptt.resource_provider_id - # AND rptt.trait_id IN ($REQUIRED_TRAIT_IDS) - # # Only if we have forbidden_traits... - # LEFT JOIN resource_provider_traits AS rptt_forbid - # ON rp.id = rptt_forbid.resource_provider_id - # AND rptt_forbid.trait_id IN ($FORBIDDEN_TRAIT_IDS) - # WHERE rp.id IN ($RP_IDS) - # # Only if we have forbidden traits... - # AND rptt_forbid.resource_provider_id IS NULL - # GROUP BY rp.root_provider_id - # # Only if have required traits... - # HAVING COUNT(DISTINCT rptt.trait_id) == $NUM_REQUIRED_TRAITS - # ) AS trees_with_traits - # ON outer_rp.root_provider_id = trees_with_traits.root_provider_id - rpt = sa.alias(_RP_TBL, name="rp") - cond = [rpt.c.id.in_(rp_ids)] - subq = sa.select([rpt.c.root_provider_id]) - subq_join = None - if required_traits: - rptt = sa.alias(_RP_TRAIT_TBL, name="rptt") - rpt_to_rptt = sa.join( - rpt, rptt, sa.and_( - rpt.c.id == rptt.c.resource_provider_id, - rptt.c.trait_id.in_(required_traits.values()))) - subq_join = rpt_to_rptt - # Only get the resource providers that have ALL the required traits, - # so we need to GROUP BY the root provider and ensure that the - # COUNT(trait_id) is equal to the number of traits we are requiring - num_traits = len(required_traits) - having_cond = sa.func.count(sa.distinct(rptt.c.trait_id)) == num_traits - subq = subq.having(having_cond) - - # Tack on an additional LEFT JOIN clause inside the derived table if we've - # got forbidden traits in the mix. - if forbidden_traits: - rptt_forbid = sa.alias(_RP_TRAIT_TBL, name="rptt_forbid") - join_to = rpt - if subq_join is not None: - join_to = subq_join - rpt_to_rptt_forbid = sa.outerjoin( - join_to, rptt_forbid, sa.and_( - rpt.c.id == rptt_forbid.c.resource_provider_id, - rptt_forbid.c.trait_id.in_(forbidden_traits.values()))) - cond.append(rptt_forbid.c.resource_provider_id == sa.null()) - subq_join = rpt_to_rptt_forbid - - subq = subq.select_from(subq_join) - subq = subq.where(sa.and_(*cond)) - subq = subq.group_by(rpt.c.root_provider_id) - trees_with_traits = sa.alias(subq, name="trees_with_traits") - - outer_rps = sa.alias(_RP_TBL, name="outer_rps") - outer_to_subq = sa.join( - outer_rps, trees_with_traits, - outer_rps.c.root_provider_id == trees_with_traits.c.root_provider_id) - sel = sa.select([outer_rps.c.id, outer_rps.c.root_provider_id]) - sel = sel.select_from(outer_to_subq) - res = ctx.session.execute(sel).fetchall() - - return [(rp_id, root_id) for rp_id, root_id in res] - - -@db_api.placement_context_manager.reader -def _get_trees_matching_all(ctx, resources, required_traits, forbidden_traits, - sharing, member_of): - """Returns a list of two-tuples (provider internal ID, root provider - internal ID) for providers that satisfy the request for resources. - - If traits are also required, this function only returns results where the - set of providers within a tree that satisfy the resource request - collectively have all the required traits associated with them. This means - that given the following provider tree: - - cn1 - | - --> pf1 (SRIOV_NET_VF:2) - | - --> pf2 (SRIOV_NET_VF:1, HW_NIC_OFFLOAD_GENEVE) - - If a user requests 1 SRIOV_NET_VF resource and no required traits will - return both pf1 and pf2. However, a request for 2 SRIOV_NET_VF and required - trait of HW_NIC_OFFLOAD_GENEVE will return no results (since pf1 is the - only provider with enough inventory of SRIOV_NET_VF but it does not have - the required HW_NIC_OFFLOAD_GENEVE trait). - - :note: This function is used for scenarios to get results for a - RequestGroup with use_same_provider=False. In this scenario, we are able - to use multiple providers within the same provider tree including sharing - providers to satisfy different resources involved in a single RequestGroup. - - :param ctx: Session context to use - :param resources: A dict, keyed by resource class ID, of the amount - requested of that resource class. - :param required_traits: A map, keyed by trait string name, of required - trait internal IDs that each provider TREE must - COLLECTIVELY have associated with it - :param forbidden_traits: A map, keyed by trait string name, of trait - internal IDs that a resource provider must - not have. - :param sharing: dict, keyed by resource class ID, of lists of resource - provider IDs that share that resource class and can - contribute to the overall allocation request - :param member_of: An optional list of lists of aggregate UUIDs. If - provided, the allocation_candidates returned will only be - for resource providers that are members of one or more of - the supplied aggregates in each aggregate UUID list. - """ - # We first grab the provider trees that have nodes that meet the request - # for each resource class. Once we have this information, we'll then do a - # followup query to winnow the set of resource providers to only those - # provider *trees* that have all of the required traits. - provs_with_inv = set() - # provs_with_inv is a list of three-tuples with the second element being - # the root provider ID and the third being resource class ID. Get the list - # of root provider IDs and get all trees that collectively have all - # required traits. - trees_with_inv = set() - - for rc_id, amount in resources.items(): - rc_provs_with_inv = _get_providers_with_resource(ctx, rc_id, amount) - if not rc_provs_with_inv: - # If there's no providers that have one of the resource classes, - # then we can short-circuit - return [] - rc_trees = set(p[1] for p in rc_provs_with_inv) - provs_with_inv |= set((p[0], p[1], rc_id) for p in rc_provs_with_inv) - - sharing_providers = sharing.get(rc_id) - if sharing_providers: - # There are sharing providers for this resource class, so we - # should also get combinations of (sharing provider, anchor root) - # in addition to (non-sharing provider, anchor root) we already - # have. - rc_provs_with_inv = _anchors_for_sharing_providers( - ctx, sharing_providers, get_id=True) - rc_provs_with_inv = set( - (p[0], p[1], rc_id) for p in rc_provs_with_inv) - rc_trees |= set(p[1] for p in rc_provs_with_inv) - provs_with_inv |= rc_provs_with_inv - - # Filter trees_with_inv to have only trees with enough inventories - # for this resource class. Here "tree" includes sharing providers - # in its terminology - if trees_with_inv: - trees_with_inv &= rc_trees - else: - trees_with_inv = rc_trees - - if not trees_with_inv: - return [] - - # Select only those tuples where there are providers for all requested - # resource classes (trees_with_inv contains the root provider IDs of those - # trees that contain all our requested resources) - provs_with_inv = set(p for p in provs_with_inv if p[1] in trees_with_inv) - - if not provs_with_inv: - return [] - - # If 'member_of' has values, do a separate lookup to identify the - # resource providers that meet the member_of constraints. - if member_of: - rps_in_aggs = _provider_ids_matching_aggregates(ctx, member_of, - rp_ids=trees_with_inv) - if not rps_in_aggs: - # Short-circuit. The user either asked for a non-existing - # aggregate or there were no resource providers that matched - # the requirements... - return [] - provs_with_inv = set(p for p in provs_with_inv if p[1] in rps_in_aggs) - - if (not required_traits and not forbidden_traits) or ( - any(sharing.values())): - # If there were no traits required, there's no difference in how we - # calculate allocation requests between nested and non-nested - # environments, so just short-circuit and return. Or if sharing - # providers are in play, we check the trait constraints later - # in _alloc_candidates_multiple_providers(), so skip. - return list(provs_with_inv) - - # Return the providers where the providers have the available inventory - # capacity and that set of providers (grouped by their tree) have all - # of the required traits and none of the forbidden traits - rp_ids_with_inv = set(p[0] for p in provs_with_inv) - rp_tuples_with_trait = _get_trees_with_traits( - ctx, rp_ids_with_inv, required_traits, forbidden_traits) - - ret = [rp_tuple for rp_tuple in provs_with_inv if ( - rp_tuple[0], rp_tuple[1]) in rp_tuples_with_trait] - - return ret - - -def _build_provider_summaries(context, usages, prov_traits): - """Given a list of dicts of usage information and a map of providers to - their associated string traits, returns a dict, keyed by resource provider - ID, of ProviderSummary objects. - - :param context: nova.context.RequestContext object - :param usages: A list of dicts with the following format: - - { - 'resource_provider_id': , - 'resource_provider_uuid': , - 'resource_class_id': , - 'total': integer, - 'reserved': integer, - 'allocation_ratio': float, - } - :param prov_traits: A dict, keyed by internal resource provider ID, of - string trait names associated with that provider - """ - # Before we go creating provider summary objects, first grab all the - # provider information (including root, parent and UUID information) for - # all providers involved in our operation - rp_ids = set(usage['resource_provider_id'] for usage in usages) - provider_ids = _provider_ids_from_rp_ids(context, rp_ids) - - # Build up a dict, keyed by internal resource provider ID, of - # ProviderSummary objects containing one or more ProviderSummaryResource - # objects representing the resources the provider has inventory for. - summaries = {} - for usage in usages: - rp_id = usage['resource_provider_id'] - summary = summaries.get(rp_id) - if not summary: - pids = provider_ids[rp_id] - summary = ProviderSummary( - context, - resource_provider=ResourceProvider( - context, id=pids.id, uuid=pids.uuid, - root_provider_uuid=pids.root_uuid, - parent_provider_uuid=pids.parent_uuid), - resources=[], - ) - summaries[rp_id] = summary - - traits = prov_traits[rp_id] - summary.traits = [Trait(context, name=tname) for tname in traits] - - rc_id = usage['resource_class_id'] - if rc_id is None: - # NOTE(tetsuro): This provider doesn't have any inventory itself. - # But we include this provider in summaries since another - # provider in the same tree will be in the "allocation_request". - # Let's skip the following and leave "ProviderSummary.resources" - # field empty. - continue - # NOTE(jaypipes): usage['used'] may be None due to the LEFT JOIN of - # the usages subquery, so we coerce NULL values to 0 here. - used = usage['used'] or 0 - allocation_ratio = usage['allocation_ratio'] - cap = int((usage['total'] - usage['reserved']) * allocation_ratio) - rc_name = _RC_CACHE.string_from_id(rc_id) - rpsr = ProviderSummaryResource( - context, - resource_class=rc_name, - capacity=cap, - used=used, - max_unit=usage['max_unit'], - ) - summary.resources.append(rpsr) - return summaries - - -def _aggregates_associated_with_providers(a, b, prov_aggs): - """quickly check if the two rps are in the same aggregates - - :param a: resource provider ID for first provider - :param b: resource provider ID for second provider - :param prov_aggs: a dict keyed by resource provider IDs, of sets - of aggregate ids associated with that provider - """ - a_aggs = prov_aggs[a] - b_aggs = prov_aggs[b] - return a_aggs & b_aggs - - -def _shared_allocation_request_resources(ctx, ns_rp_id, requested_resources, - sharing, summaries, prov_aggs): - """Returns a dict, keyed by resource class ID, of lists of - AllocationRequestResource objects that represent resources that are - provided by a sharing provider. - - :param ctx: nova.context.RequestContext object - :param ns_rp_id: an internal ID of a non-sharing resource provider - :param requested_resources: dict, keyed by resource class ID, of amounts - being requested for that resource class - :param sharing: dict, keyed by resource class ID, of lists of resource - provider IDs that share that resource class and can - contribute to the overall allocation request - :param summaries: dict, keyed by resource provider ID, of ProviderSummary - objects containing usage and trait information for - resource providers involved in the overall request - :param prov_aggs: dict, keyed by resource provider ID, of sets of - aggregate ids associated with that provider. - """ - res_requests = collections.defaultdict(list) - for rc_id in sharing: - for rp_id in sharing[rc_id]: - aggs_in_both = _aggregates_associated_with_providers( - ns_rp_id, rp_id, prov_aggs) - if not aggs_in_both: - continue - summary = summaries[rp_id] - rp_uuid = summary.resource_provider.uuid - res_req = AllocationRequestResource( - ctx, - resource_provider=ResourceProvider(ctx, uuid=rp_uuid), - resource_class=_RC_CACHE.string_from_id(rc_id), - amount=requested_resources[rc_id], - ) - res_requests[rc_id].append(res_req) - return res_requests - - -def _allocation_request_for_provider(ctx, requested_resources, provider): - """Returns an AllocationRequest object containing AllocationRequestResource - objects for each resource class in the supplied requested resources dict. - - :param ctx: nova.context.RequestContext object - :param requested_resources: dict, keyed by resource class ID, of amounts - being requested for that resource class - :param provider: ResourceProvider object representing the provider of the - resources. - """ - resource_requests = [ - AllocationRequestResource( - ctx, resource_provider=provider, - resource_class=_RC_CACHE.string_from_id(rc_id), - amount=amount, - ) for rc_id, amount in requested_resources.items() - ] - # NOTE(efried): This method only produces an AllocationRequest with its - # anchor in its own tree. If the provider is a sharing provider, the - # caller needs to identify the other anchors with which it might be - # associated. - return AllocationRequest( - ctx, resource_requests=resource_requests, - anchor_root_provider_uuid=provider.root_provider_uuid) - - -def _check_traits_for_alloc_request(res_requests, summaries, prov_traits, - required_traits, forbidden_traits): - """Given a list of AllocationRequestResource objects, check if that - combination can provide trait constraints. If it can, returns all - resource provider internal IDs in play, else return an empty list. - - TODO(tetsuro): For optimization, we should move this logic to SQL in - _get_trees_matching_all(). - - :param res_requests: a list of AllocationRequestResource objects that have - resource providers to be checked if they collectively - satisfy trait constraints in the required_traits and - forbidden_traits parameters. - :param summaries: dict, keyed by resource provider ID, of ProviderSummary - objects containing usage and trait information for - resource providers involved in the overall request - :param prov_traits: A dict, keyed by internal resource provider ID, of - string trait names associated with that provider - :param required_traits: A map, keyed by trait string name, of required - trait internal IDs that each *allocation request's - set of providers* must *collectively* have - associated with them - :param forbidden_traits: A map, keyed by trait string name, of trait - internal IDs that a resource provider must - not have. - """ - all_prov_ids = [] - all_traits = set() - for res_req in res_requests: - rp_uuid = res_req.resource_provider.uuid - for rp_id, summary in summaries.items(): - if summary.resource_provider.uuid == rp_uuid: - break - rp_traits = set(prov_traits.get(rp_id, [])) - - # Check if there are forbidden_traits - conflict_traits = set(forbidden_traits) & set(rp_traits) - if conflict_traits: - LOG.debug('Excluding resource provider %s, it has ' - 'forbidden traits: (%s).', - rp_id, ', '.join(conflict_traits)) - return [] - - all_prov_ids.append(rp_id) - all_traits |= rp_traits - - # Check if there are missing traits - missing_traits = set(required_traits) - all_traits - if missing_traits: - LOG.debug('Excluding a set of allocation candidate %s : ' - 'missing traits %s are not satisfied.', - all_prov_ids, ','.join(missing_traits)) - return [] - - return all_prov_ids - - -def _alloc_candidates_single_provider(ctx, requested_resources, rp_tuples): - """Returns a tuple of (allocation requests, provider summaries) for a - supplied set of requested resource amounts and resource providers. The - supplied resource providers have capacity to satisfy ALL of the resources - in the requested resources as well as ALL required traits that were - requested by the user. - - This is used in two circumstances: - - To get results for a RequestGroup with use_same_provider=True. - - As an optimization when no sharing providers satisfy any of the requested - resources, and nested providers are not in play. - In these scenarios, we can more efficiently build the list of - AllocationRequest and ProviderSummary objects due to not having to - determine requests across multiple providers. - - :param ctx: nova.context.RequestContext object - :param requested_resources: dict, keyed by resource class ID, of amounts - being requested for that resource class - :param rp_tuples: List of two-tuples of (provider ID, root provider ID)s - for providers that matched the requested resources - """ - if not rp_tuples: - return [], [] - - # Get all root resource provider IDs. - root_ids = set(p[1] for p in rp_tuples) - - # Grab usage summaries for each provider - usages = _get_usages_by_provider_tree(ctx, root_ids) - - # Get a dict, keyed by resource provider internal ID, of trait string names - # that provider has associated with it - prov_traits = _get_traits_by_provider_tree(ctx, root_ids) - - # Get a dict, keyed by resource provider internal ID, of ProviderSummary - # objects for all providers - summaries = _build_provider_summaries(ctx, usages, prov_traits) - - # Next, build up a list of allocation requests. These allocation requests - # are AllocationRequest objects, containing resource provider UUIDs, - # resource class names and amounts to consume from that resource provider - alloc_requests = [] - for rp_id, root_id in rp_tuples: - rp_summary = summaries[rp_id] - req_obj = _allocation_request_for_provider( - ctx, requested_resources, rp_summary.resource_provider) - alloc_requests.append(req_obj) - # If this is a sharing provider, we have to include an extra - # AllocationRequest for every possible anchor. - traits = [trait.name for trait in rp_summary.traits] - if os_traits.MISC_SHARES_VIA_AGGREGATE in traits: - anchors = set([p[1] for p in _anchors_for_sharing_providers( - ctx, [rp_summary.resource_provider.id])]) - for anchor in anchors: - # We already added self - if anchor == rp_summary.resource_provider.root_provider_uuid: - continue - req_obj = copy.deepcopy(req_obj) - req_obj.anchor_root_provider_uuid = anchor - alloc_requests.append(req_obj) - return alloc_requests, list(summaries.values()) - - -def _alloc_candidates_multiple_providers(ctx, requested_resources, - required_traits, forbidden_traits, rp_tuples): - """Returns a tuple of (allocation requests, provider summaries) for a - supplied set of requested resource amounts and tuples of - (rp_id, root_id, rc_id). The supplied resource provider trees have - capacity to satisfy ALL of the resources in the requested resources as - well as ALL required traits that were requested by the user. - - This is a code path to get results for a RequestGroup with - use_same_provider=False. In this scenario, we are able to use multiple - providers within the same provider tree including sharing providers to - satisfy different resources involved in a single request group. - - :param ctx: nova.context.RequestContext object - :param requested_resources: dict, keyed by resource class ID, of amounts - being requested for that resource class - :param required_traits: A map, keyed by trait string name, of required - trait internal IDs that each *allocation request's - set of providers* must *collectively* have - associated with them - :param forbidden_traits: A map, keyed by trait string name, of trait - internal IDs that a resource provider must - not have. - :param rp_tuples: List of tuples of (provider ID, anchor root provider ID, - resource class ID)s for providers that matched the - requested resources - """ - if not rp_tuples: - return [], [] - - # Get all the root resource provider IDs. We should include the first - # values of rp_tuples because while sharing providers are root providers, - # they have their "anchor" providers for the second value. - root_ids = set(p[0] for p in rp_tuples) | set(p[1] for p in rp_tuples) - - # Grab usage summaries for each provider in the trees - usages = _get_usages_by_provider_tree(ctx, root_ids) - - # Get a dict, keyed by resource provider internal ID, of trait string names - # that provider has associated with it - prov_traits = _get_traits_by_provider_tree(ctx, root_ids) - - # Get a dict, keyed by resource provider internal ID, of ProviderSummary - # objects for all providers - summaries = _build_provider_summaries(ctx, usages, prov_traits) - - # Get a dict, keyed by root provider internal ID, of a dict, keyed by - # resource class internal ID, of lists of AllocationRequestResource objects - tree_dict = collections.defaultdict(lambda: collections.defaultdict(list)) - - for rp_id, root_id, rc_id in rp_tuples: - rp_summary = summaries[rp_id] - tree_dict[root_id][rc_id].append( - AllocationRequestResource( - ctx, resource_provider=rp_summary.resource_provider, - resource_class=_RC_CACHE.string_from_id(rc_id), - amount=requested_resources[rc_id])) - - # Next, build up a list of allocation requests. These allocation requests - # are AllocationRequest objects, containing resource provider UUIDs, - # resource class names and amounts to consume from that resource provider - alloc_requests = [] - - # Build a list of lists of provider internal IDs that end up in - # allocation request objects. This is used to ensure we don't end up - # having allocation requests with duplicate sets of resource providers. - alloc_prov_ids = [] - - # Let's look into each tree - for root_id, alloc_dict in tree_dict.items(): - # Get request_groups, which is a list of lists of - # AllocationRequestResource(ARR) per requested resource class(rc). - # For example, if we have the alloc_dict: - # {rc1_id: [ARR(rc1, rp1), ARR(rc1, rp2)], - # rc2_id: [ARR(rc2, rp1), ARR(rc2, rp2)], - # rc3_id: [ARR(rc3, rp1)]} - # then the request_groups would be something like - # [[ARR(rc1, rp1), ARR(rc1, rp2)], - # [ARR(rc2, rp1), ARR(rc2, rp2)], - # [ARR(rc3, rp1)]] - # , which should be ordered by the resource class id. - request_groups = [val for key, val in sorted(alloc_dict.items())] - - root_summary = summaries[root_id] - root_uuid = root_summary.resource_provider.uuid - - # Using itertools.product, we get all the combinations of resource - # providers in a tree. - # For example, the sample in the comment above becomes: - # [(ARR(rc1, ss1), ARR(rc2, ss1), ARR(rc3, ss1)), - # (ARR(rc1, ss1), ARR(rc2, ss2), ARR(rc3, ss1)), - # (ARR(rc1, ss2), ARR(rc2, ss1), ARR(rc3, ss1)), - # (ARR(rc1, ss2), ARR(rc2, ss2), ARR(rc3, ss1))] - for res_requests in itertools.product(*request_groups): - all_prov_ids = _check_traits_for_alloc_request(res_requests, - summaries, prov_traits, required_traits, forbidden_traits) - if (not all_prov_ids) or (all_prov_ids in alloc_prov_ids): - # This combination doesn't satisfy trait constraints, - # ...or we already have this permutation, which happens - # when multiple sharing providers with different resource - # classes are in one request. - continue - alloc_prov_ids.append(all_prov_ids) - alloc_requests.append( - AllocationRequest(ctx, resource_requests=list(res_requests), - anchor_root_provider_uuid=root_uuid) - ) - return alloc_requests, list(summaries.values()) - - -@db_api.placement_context_manager.reader -def _get_traits_by_provider_tree(ctx, root_ids): - """Returns a dict, keyed by provider IDs for all resource providers - in all trees indicated in the ``root_ids``, of string trait names - associated with that provider. - - :raises: ValueError when root_ids is empty. - - :param ctx: nova.context.RequestContext object - :param root_ids: list of root resource provider IDs - """ - if not root_ids: - raise ValueError(_("Expected root_ids to be a list of root resource" - "provider internal IDs, but got an empty list.")) - - rpt = sa.alias(_RP_TBL, name='rpt') - rptt = sa.alias(_RP_TRAIT_TBL, name='rptt') - tt = sa.alias(_TRAIT_TBL, name='t') - rpt_rptt = sa.join(rpt, rptt, rpt.c.id == rptt.c.resource_provider_id) - j = sa.join(rpt_rptt, tt, rptt.c.trait_id == tt.c.id) - sel = sa.select([rptt.c.resource_provider_id, tt.c.name]).select_from(j) - sel = sel.where(rpt.c.root_provider_id.in_(root_ids)) - res = collections.defaultdict(list) - for r in ctx.session.execute(sel): - res[r[0]].append(r[1]) - return res - - -@db_api.placement_context_manager.reader -def _trait_ids_from_names(ctx, names): - """Given a list of string trait names, returns a dict, keyed by those - string names, of the corresponding internal integer trait ID. - - :raises: ValueError when names is empty. - - :param ctx: nova.context.RequestContext object - :param names: list of string trait names - """ - if not names: - raise ValueError(_("Expected names to be a list of string trait " - "names, but got an empty list.")) - - # Avoid SAWarnings about unicode types... - unames = map(six.text_type, names) - tt = sa.alias(_TRAIT_TBL, name='t') - sel = sa.select([tt.c.name, tt.c.id]).where(tt.c.name.in_(unames)) - return {r[0]: r[1] for r in ctx.session.execute(sel)} - - -def _rp_rc_key(rp, rc): - """Creates hashable key unique to a provider + resource class.""" - return rp.uuid, rc - - -def _consolidate_allocation_requests(areqs): - """Consolidates a list of AllocationRequest into one. - - :param areqs: A list containing one AllocationRequest for each input - RequestGroup. This may mean that multiple resource_requests - contain resource amounts of the same class from the same provider. - :return: A single consolidated AllocationRequest, containing no - resource_requests with duplicated (resource_provider, - resource_class). - """ - # Construct a dict, keyed by resource provider UUID + resource class, of - # AllocationRequestResource, consolidating as we go. - arrs_by_rp_rc = {} - # areqs must have at least one element. Save the anchor to populate the - # returned AllocationRequest. - anchor_rp_uuid = areqs[0].anchor_root_provider_uuid - for areq in areqs: - # Sanity check: the anchor should be the same for every areq - if anchor_rp_uuid != areq.anchor_root_provider_uuid: - # This should never happen. If it does, it's a dev bug. - raise ValueError( - _("Expected every AllocationRequest in " - "`_consolidate_allocation_requests` to have the same " - "anchor!")) - for arr in areq.resource_requests: - key = _rp_rc_key(arr.resource_provider, arr.resource_class) - if key not in arrs_by_rp_rc: - arrs_by_rp_rc[key] = copy.deepcopy(arr) - else: - arrs_by_rp_rc[key].amount += arr.amount - return AllocationRequest( - resource_requests=list(arrs_by_rp_rc.values()), - anchor_root_provider_uuid=anchor_rp_uuid) - - -def _satisfies_group_policy(areqs, group_policy, num_granular_groups): - """Applies group_policy to a list of AllocationRequest. - - Returns True or False, indicating whether this list of - AllocationRequest satisfies group_policy, as follows: - - * "isolate": Each AllocationRequest with use_same_provider=True - is satisfied by a single resource provider. If the "isolate" - policy is in effect, each such AllocationRequest must be - satisfied by a *unique* resource provider. - * "none" or None: Always returns True. - - :param areqs: A list containing one AllocationRequest for each input - RequestGroup. - :param group_policy: String indicating how RequestGroups should interact - with each other. If the value is "isolate", we will return False - if AllocationRequests that came from RequestGroups keyed by - nonempty suffixes are satisfied by the same provider. - :param num_granular_groups: The number of granular (use_same_provider=True) - RequestGroups in the request. - :return: True if areqs satisfies group_policy; False otherwise. - """ - if group_policy != 'isolate': - # group_policy="none" means no filtering - return True - - # The number of unique resource providers referenced in the request groups - # having use_same_provider=True must be equal to the number of granular - # groups. - num_granular_groups_in_areqs = len(set( - # We can reliably use the first resource_request's provider: all the - # resource_requests are satisfied by the same provider by definition - # because use_same_provider is True. - areq.resource_requests[0].resource_provider.uuid - for areq in areqs - if areq.use_same_provider)) - if num_granular_groups == num_granular_groups_in_areqs: - return True - LOG.debug('Excluding the following set of AllocationRequest because ' - 'group_policy=isolate and the number of granular groups in the ' - 'set (%d) does not match the number of granular groups in the ' - 'request (%d): %s', - num_granular_groups_in_areqs, num_granular_groups, str(areqs)) - return False - - -def _exceeds_capacity(areq, psum_res_by_rp_rc): - """Checks a (consolidated) AllocationRequest against the provider summaries - to ensure that it does not exceed capacity. - - Exceeding capacity can mean the total amount (already used plus this - allocation) exceeds the total inventory amount; or this allocation exceeds - the max_unit in the inventory record. - - :param areq: An AllocationRequest produced by the - `_consolidate_allocation_requests` method. - :param psum_res_by_rp_rc: A dict, keyed by provider + resource class via - _rp_rc_key, of ProviderSummaryResource. - :return: True if areq exceeds capacity; False otherwise. - """ - for arr in areq.resource_requests: - key = _rp_rc_key(arr.resource_provider, arr.resource_class) - psum_res = psum_res_by_rp_rc[key] - if psum_res.used + arr.amount > psum_res.capacity: - LOG.debug('Excluding the following AllocationRequest because used ' - '(%d) + amount (%d) > capacity (%d) for resource class ' - '%s: %s', - psum_res.used, arr.amount, psum_res.capacity, - arr.resource_class, str(areq)) - return True - if arr.amount > psum_res.max_unit: - LOG.debug('Excluding the following AllocationRequest because ' - 'amount (%d) > max_unit (%d) for resource class %s: %s', - arr.amount, psum_res.max_unit, arr.resource_class, - str(areq)) - return True - return False - - -def _merge_candidates(candidates, group_policy=None): - """Given a dict, keyed by RequestGroup suffix, of tuples of - (allocation_requests, provider_summaries), produce a single tuple of - (allocation_requests, provider_summaries) that appropriately incorporates - the elements from each. - - Each (alloc_reqs, prov_sums) in `candidates` satisfies one RequestGroup. - This method creates a list of alloc_reqs, *each* of which satisfies *all* - of the RequestGroups. - - For that merged list of alloc_reqs, a corresponding provider_summaries is - produced. - - :param candidates: A dict, keyed by integer suffix or '', of tuples of - (allocation_requests, provider_summaries) to be merged. - :param group_policy: String indicating how RequestGroups should interact - with each other. If the value is "isolate", we will filter out - candidates where AllocationRequests that came from RequestGroups - keyed by nonempty suffixes are satisfied by the same provider. - :return: A tuple of (allocation_requests, provider_summaries). - """ - # Build a dict, keyed by anchor root provider UUID, of dicts, keyed by - # suffix, of nonempty lists of AllocationRequest. Each inner dict must - # possess all of the suffix keys to be viable (i.e. contains at least - # one AllocationRequest per RequestGroup). - # - # areq_lists_by_anchor = - # { anchor_root_provider_uuid: { - # '': [AllocationRequest, ...], \ This dict must contain - # '1': [AllocationRequest, ...], \ exactly one nonempty list per - # ... / suffix to be viable. That - # '42': [AllocationRequest, ...], / filtering is done later. - # }, - # ... - # } - areq_lists_by_anchor = collections.defaultdict( - lambda: collections.defaultdict(list)) - # Save off all the provider summaries lists - we'll use 'em later. - all_psums = [] - # Construct a dict, keyed by resource provider + resource class, of - # ProviderSummaryResource. This will be used to do a final capacity - # check/filter on each merged AllocationRequest. - psum_res_by_rp_rc = {} - for suffix, (areqs, psums) in candidates.items(): - for areq in areqs: - anchor = areq.anchor_root_provider_uuid - areq_lists_by_anchor[anchor][suffix].append(areq) - for psum in psums: - all_psums.append(psum) - for psum_res in psum.resources: - key = _rp_rc_key( - psum.resource_provider, psum_res.resource_class) - psum_res_by_rp_rc[key] = psum_res - - # Create all combinations picking one AllocationRequest from each list - # for each anchor. - areqs = [] - all_suffixes = set(candidates) - num_granular_groups = len(all_suffixes - set([''])) - for areq_lists_by_suffix in areq_lists_by_anchor.values(): - # Filter out any entries that don't have allocation requests for - # *all* suffixes (i.e. all RequestGroups) - if set(areq_lists_by_suffix) != all_suffixes: - continue - # We're using itertools.product to go from this: - # areq_lists_by_suffix = { - # '': [areq__A, areq__B, ...], - # '1': [areq_1_A, areq_1_B, ...], - # ... - # '42': [areq_42_A, areq_42_B, ...], - # } - # to this: - # [ [areq__A, areq_1_A, ..., areq_42_A], Each of these lists is one - # [areq__A, areq_1_A, ..., areq_42_B], areq_list in the loop below. - # [areq__A, areq_1_B, ..., areq_42_A], each areq_list contains one - # [areq__A, areq_1_B, ..., areq_42_B], AllocationRequest from each - # [areq__B, areq_1_A, ..., areq_42_A], RequestGroup. So taken as a - # [areq__B, areq_1_A, ..., areq_42_B], whole, each list is a viable - # [areq__B, areq_1_B, ..., areq_42_A], (preliminary) candidate to - # [areq__B, areq_1_B, ..., areq_42_B], return. - # ..., - # ] - for areq_list in itertools.product( - *list(areq_lists_by_suffix.values())): - # At this point, each AllocationRequest in areq_list is still - # marked as use_same_provider. This is necessary to filter by group - # policy, which enforces how these interact with each other. - if not _satisfies_group_policy( - areq_list, group_policy, num_granular_groups): - continue - # Now we go from this (where 'arr' is AllocationRequestResource): - # [ areq__B(arrX, arrY, arrZ), - # areq_1_A(arrM, arrN), - # ..., - # areq_42_B(arrQ) - # ] - # to this: - # areq_combined(arrX, arrY, arrZ, arrM, arrN, arrQ) - # Note that this discards the information telling us which - # RequestGroup led to which piece of the final AllocationRequest. - # We needed that to be present for the previous filter; we need it - # to be *absent* for the next one (and for the final output). - areq = _consolidate_allocation_requests(areq_list) - # Since we sourced this AllocationRequest from multiple - # *independent* queries, it's possible that the combined result - # now exceeds capacity where amounts of the same RP+RC were - # folded together. So do a final capacity check/filter. - if _exceeds_capacity(areq, psum_res_by_rp_rc): - continue - areqs.append(areq) - - # It's possible we've filtered out everything. If so, short out. - if not areqs: - return [], [] - - # Now we have to produce provider summaries. The provider summaries in - # the candidates input contain all the information; we just need to - # filter it down to only the providers in trees represented by our merged - # list of allocation requests. - tree_uuids = set() - for areq in areqs: - for arr in areq.resource_requests: - tree_uuids.add(arr.resource_provider.root_provider_uuid) - psums = [psum for psum in all_psums if - psum.resource_provider.root_provider_uuid in tree_uuids] - - return areqs, psums - - -@base.VersionedObjectRegistry.register_if(False) -class AllocationCandidates(base.VersionedObject): - """The AllocationCandidates object is a collection of possible allocations - that match some request for resources, along with some summary information - about the resource providers involved in these allocation candidates. - """ - - fields = { - # A collection of allocation possibilities that can be attempted by the - # caller that would, at the time of calling, meet the requested - # resource constraints - 'allocation_requests': fields.ListOfObjectsField('AllocationRequest'), - # Information about usage and inventory that relate to any provider - # contained in any of the AllocationRequest objects in the - # allocation_requests field - 'provider_summaries': fields.ListOfObjectsField('ProviderSummary'), - } - - @classmethod - def get_by_requests(cls, context, requests, limit=None, group_policy=None): - """Returns an AllocationCandidates object containing all resource - providers matching a set of supplied resource constraints, with a set - of allocation requests constructed from that list of resource - providers. If CONF.placement.randomize_allocation_candidates is True - (default is False) then the order of the allocation requests will - be randomized. - - :param context: Nova RequestContext. - :param requests: Dict, keyed by suffix, of - nova.api.openstack.placement.util.RequestGroup - :param limit: An integer, N, representing the maximum number of - allocation candidates to return. If - CONF.placement.randomize_allocation_candidates is True - this will be a random sampling of N of the available - results. If False then the first N results, in whatever - order the database picked them, will be returned. In - either case if there are fewer than N total results, - all the results will be returned. - :param group_policy: String indicating how RequestGroups with - use_same_provider=True should interact with each - other. If the value is "isolate", we will filter - out allocation requests where any such - RequestGroups are satisfied by the same RP. - :return: An instance of AllocationCandidates with allocation_requests - and provider_summaries satisfying `requests`, limited - according to `limit`. - """ - alloc_reqs, provider_summaries = cls._get_by_requests( - context, requests, limit=limit, group_policy=group_policy) - return cls( - context, - allocation_requests=alloc_reqs, - provider_summaries=provider_summaries, - ) - - @staticmethod - def _get_by_one_request(context, request): - """Get allocation candidates for one RequestGroup. - - Must be called from within an placement_context_manager.reader - (or writer) context. - - :param context: Nova RequestContext. - :param request: One nova.api.openstack.placement.util.RequestGroup - :return: A tuple of (allocation_requests, provider_summaries) - satisfying `request`. - """ - # Transform resource string names to internal integer IDs - resources = { - _RC_CACHE.id_from_string(key): value - for key, value in request.resources.items() - } - - # maps the trait name to the trait internal ID - required_trait_map = {} - forbidden_trait_map = {} - for trait_map, traits in ( - (required_trait_map, request.required_traits), - (forbidden_trait_map, request.forbidden_traits)): - if traits: - trait_map.update(_trait_ids_from_names(context, traits)) - # Double-check that we found a trait ID for each requested name - if len(trait_map) != len(traits): - missing = traits - set(trait_map) - raise exception.TraitNotFound(names=', '.join(missing)) - - # Microversions prior to 1.21 will not have 'member_of' in the groups. - # This allows earlier microversions to continue to work. - member_of = getattr(request, "member_of", None) - - if not request.use_same_provider: - # TODO(jaypipes): The check/callout to handle trees goes here. - # Build a dict, keyed by resource class internal ID, of lists of - # internal IDs of resource providers that share some inventory for - # each resource class requested. - # TODO(jaypipes): Consider caching this for some amount of time - # since sharing providers generally don't change often and here we - # aren't concerned with how *much* inventory/capacity the sharing - # provider has, only that it is sharing *some* inventory of a - # particular resource class. - sharing_providers = { - rc_id: _get_providers_with_shared_capacity(context, rc_id, - amount, member_of) - for rc_id, amount in resources.items() - } - - # If there aren't any providers that have any of the - # required traits, just exit early... - if required_trait_map: - # TODO(cdent): Now that there is also a forbidden_trait_map - # it should be possible to further optimize this attempt at - # a quick return, but we leave that to future patches for - # now. - trait_rps = _get_provider_ids_having_any_trait( - context, required_trait_map) - if not trait_rps: - return [], [] - rp_tuples = _get_trees_matching_all(context, resources, - required_trait_map, forbidden_trait_map, - sharing_providers, member_of) - return _alloc_candidates_multiple_providers(context, resources, - required_trait_map, forbidden_trait_map, rp_tuples) - - # Either we are processing a single-RP request group, or there are no - # sharing providers that (help) satisfy the request. Get a list of - # resource provider IDs that have ALL the requested resources and more - # efficiently construct the allocation requests. - # NOTE(jaypipes): When we start handling nested providers, we may - # add new code paths or modify this code path to return root - # provider IDs of provider trees instead of the resource provider - # IDs. - rp_ids = _get_provider_ids_matching(context, resources, - required_trait_map, - forbidden_trait_map, member_of) - return _alloc_candidates_single_provider(context, resources, rp_ids) - - @classmethod - # TODO(efried): This is only a writer context because it accesses the - # resource_providers table via ResourceProvider.get_by_uuid, which does - # data migration to populate the root_provider_uuid. Change this back to a - # reader when that migration is no longer happening. - @db_api.placement_context_manager.writer - def _get_by_requests(cls, context, requests, limit=None, - group_policy=None): - candidates = {} - for suffix, request in requests.items(): - alloc_reqs, summaries = cls._get_by_one_request(context, request) - LOG.debug("%s (suffix '%s') returned %d matches", - str(request), str(suffix), len(alloc_reqs)) - if not alloc_reqs: - # Shortcut: If any one request resulted in no candidates, the - # whole operation is shot. - return [], [] - # Mark each allocation request according to whether its - # corresponding RequestGroup required it to be restricted to a - # single provider. We'll need this later to evaluate group_policy. - for areq in alloc_reqs: - areq.use_same_provider = request.use_same_provider - candidates[suffix] = alloc_reqs, summaries - - # At this point, each (alloc_requests, summary_obj) in `candidates` is - # independent of the others. We need to fold them together such that - # each allocation request satisfies *all* the incoming `requests`. The - # `candidates` dict is guaranteed to contain entries for all suffixes, - # or we would have short-circuited above. - alloc_request_objs, summary_objs = _merge_candidates( - candidates, group_policy=group_policy) - - # Limit the number of allocation request objects. We do this after - # creating all of them so that we can do a random slice without - # needing to mess with the complex sql above or add additional - # columns to the DB. - if limit and limit <= len(alloc_request_objs): - if CONF.placement.randomize_allocation_candidates: - alloc_request_objs = random.sample(alloc_request_objs, limit) - else: - alloc_request_objs = alloc_request_objs[:limit] - elif CONF.placement.randomize_allocation_candidates: - random.shuffle(alloc_request_objs) - - # Limit summaries to only those mentioned in the allocation requests. - if limit and limit <= len(alloc_request_objs): - kept_summary_objs = [] - alloc_req_rp_uuids = set() - # Extract resource provider uuids from the resource requests. - for aro in alloc_request_objs: - for arr in aro.resource_requests: - alloc_req_rp_uuids.add(arr.resource_provider.uuid) - for summary in summary_objs: - rp_uuid = summary.resource_provider.uuid - # Skip a summary if we are limiting and haven't selected an - # allocation request that uses the resource provider. - if rp_uuid not in alloc_req_rp_uuids: - continue - kept_summary_objs.append(summary) - else: - kept_summary_objs = summary_objs - - return alloc_request_objs, kept_summary_objs - - -@db_api.placement_context_manager.writer -def reshape(ctx, inventories, allocations): - """The 'replace the world' strategy that is executed when we want to - completely replace a set of provider inventory, allocation and consumer - information in a single transaction. - - :note: The reason this has to be done in a single monolithic function is so - we have a single top-level function on which to decorate with the - @db_api.placement_context_manager.writer transaction context - manager. Each time a top-level function that is decorated with this - exits, the transaction is either COMMIT'd or ROLLBACK'd. We need to - avoid calling two functions that are already decorated with a - transaction context manager from a function that *isn't* decorated - with the transaction context manager if we want all changes involved - in the sub-functions to operate within a single DB transaction. - - :param ctx: `nova.api.openstack.placement.context.RequestContext` object - containing the DB transaction context. - :param inventories: dict, keyed by resource provider UUID, of - `InventoryList` objects representing the replaced - inventory information for the provider. - :param allocations: `AllocationList` object containing all allocations for - all consumers being modified by the reshape operation. - :raises: `exception.ConcurrentUpdateDetected` when any resource provider or - consumer generation increment fails due to concurrent changes to - the same objects. - """ - # The resource provider objects, keyed by provider UUID, that are involved - # in this transaction. We keep a cache of these because as we perform the - # various operations on the providers, their generations increment and we - # want to "inject" the changed resource provider objects into the - # AllocationList's objects before calling AllocationList.replace_all() - affected_providers = {} - # We have to do the inventory changes in two steps because: - # - we can't delete inventories with allocations; and - # - we can't create allocations on nonexistent inventories. - # So in the first step we create a kind of "union" inventory for each - # provider. It contains all the inventories that the request wishes to - # exist in the end, PLUS any inventories that the request wished to remove - # (in their original form). - # Note that this can cause us to end up with an interim situation where we - # have modified an inventory to have less capacity than is currently - # allocated, but that's allowed by the code. If the final picture is - # overcommitted, we'll get an appropriate exception when we replace the - # allocations at the end. - for rp_uuid, new_inv_list in inventories.items(): - LOG.debug("reshaping: *interim* inventory replacement for provider %s", - rp_uuid) - rp = new_inv_list[0].resource_provider - # A dict, keyed by resource class, of the Inventory objects. We start - # with the original inventory list. - inv_by_rc = {inv.resource_class: inv for inv in - InventoryList.get_all_by_resource_provider(ctx, rp)} - # Now add each inventory in the new inventory list. If an inventory for - # that resource class existed in the original inventory list, it is - # overwritten. - for inv in new_inv_list: - inv_by_rc[inv.resource_class] = inv - # Set the interim inventory structure. - rp.set_inventory(InventoryList(objects=list(inv_by_rc.values()))) - affected_providers[rp_uuid] = rp - - # NOTE(jaypipes): The above inventory replacements will have - # incremented the resource provider generations, so we need to look in - # the AllocationList and swap the resource provider object with the one we - # saved above that has the updated provider generation in it. - for alloc in allocations: - rp_uuid = alloc.resource_provider.uuid - if rp_uuid in affected_providers: - alloc.resource_provider = affected_providers[rp_uuid] - - # Now we can replace all the allocations - LOG.debug("reshaping: attempting allocation replacement") - allocations.replace_all() - - # And finally, we can set the inventories to their actual desired state. - for rp_uuid, new_inv_list in inventories.items(): - LOG.debug("reshaping: *final* inventory replacement for provider %s", - rp_uuid) - # TODO(efried): If we wanted this to be more efficient, we could keep - # track of providers for which all inventories are being deleted in the - # above loop and just do those and skip the rest, since they're already - # in their final form. - new_inv_list[0].resource_provider.set_inventory(new_inv_list) diff --git a/nova/api/openstack/placement/objects/user.py b/nova/api/openstack/placement/objects/user.py deleted file mode 100644 index 8d5af8473d6..00000000000 --- a/nova/api/openstack/placement/objects/user.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_versionedobjects import base -from oslo_versionedobjects import fields -import sqlalchemy as sa - -from nova.api.openstack.placement import db_api -from nova.api.openstack.placement import exception -from nova.db.sqlalchemy import api_models as models - -CONF = cfg.CONF -USER_TBL = models.User.__table__ - - -@db_api.placement_context_manager.writer -def ensure_incomplete_user(ctx): - """Ensures that a user record is created for the "incomplete consumer - user". Returns the internal ID of that record. - """ - incomplete_id = CONF.placement.incomplete_consumer_user_id - sel = sa.select([USER_TBL.c.id]).where( - USER_TBL.c.external_id == incomplete_id) - res = ctx.session.execute(sel).fetchone() - if res: - return res[0] - ins = USER_TBL.insert().values(external_id=incomplete_id) - res = ctx.session.execute(ins) - return res.inserted_primary_key[0] - - -@db_api.placement_context_manager.reader -def _get_user_by_external_id(ctx, external_id): - users = sa.alias(USER_TBL, name="u") - cols = [ - users.c.id, - users.c.external_id, - users.c.updated_at, - users.c.created_at - ] - sel = sa.select(cols) - sel = sel.where(users.c.external_id == external_id) - res = ctx.session.execute(sel).fetchone() - if not res: - raise exception.UserNotFound(external_id=external_id) - - return dict(res) - - -@base.VersionedObjectRegistry.register_if(False) -class User(base.VersionedObject): - - fields = { - 'id': fields.IntegerField(read_only=True), - 'external_id': fields.StringField(nullable=False), - } - - @staticmethod - def _from_db_object(ctx, target, source): - for field in target.fields: - setattr(target, field, source[field]) - - target._context = ctx - target.obj_reset_changes() - return target - - @classmethod - def get_by_external_id(cls, ctx, external_id): - res = _get_user_by_external_id(ctx, external_id) - return cls._from_db_object(ctx, cls(ctx), res) - - def create(self): - @db_api.placement_context_manager.writer - def _create_in_db(ctx): - db_obj = models.User(external_id=self.external_id) - try: - db_obj.save(ctx.session) - except db_exc.DBDuplicateEntry: - raise exception.UserExists(external_id=self.external_id) - self._from_db_object(ctx, self, db_obj) - _create_in_db(self._context) diff --git a/nova/api/openstack/placement/policies/__init__.py b/nova/api/openstack/placement/policies/__init__.py deleted file mode 100644 index cd65514d39a..00000000000 --- a/nova/api/openstack/placement/policies/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools - -from nova.api.openstack.placement.policies import aggregate -from nova.api.openstack.placement.policies import allocation -from nova.api.openstack.placement.policies import allocation_candidate -from nova.api.openstack.placement.policies import base -from nova.api.openstack.placement.policies import inventory -from nova.api.openstack.placement.policies import resource_class -from nova.api.openstack.placement.policies import resource_provider -from nova.api.openstack.placement.policies import trait -from nova.api.openstack.placement.policies import usage - - -def list_rules(): - return itertools.chain( - base.list_rules(), - resource_provider.list_rules(), - resource_class.list_rules(), - inventory.list_rules(), - aggregate.list_rules(), - usage.list_rules(), - trait.list_rules(), - allocation.list_rules(), - allocation_candidate.list_rules() - ) diff --git a/nova/api/openstack/placement/policies/aggregate.py b/nova/api/openstack/placement/policies/aggregate.py deleted file mode 100644 index 8e2bd8c3ab7..00000000000 --- a/nova/api/openstack/placement/policies/aggregate.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from nova.api.openstack.placement.policies import base - - -PREFIX = 'placement:resource_providers:aggregates:%s' -LIST = PREFIX % 'list' -UPDATE = PREFIX % 'update' -BASE_PATH = '/resource_providers/{uuid}/aggregates' - -rules = [ - policy.DocumentedRuleDefault( - LIST, - base.RULE_ADMIN_API, - "List resource provider aggregates.", - [ - { - 'method': 'GET', - 'path': BASE_PATH - } - ], - scope_types=['system'] - ), - policy.DocumentedRuleDefault( - UPDATE, - base.RULE_ADMIN_API, - "Update resource provider aggregates.", - [ - { - 'method': 'PUT', - 'path': BASE_PATH - } - ], - scope_types=['system'] - ), -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policies/allocation.py b/nova/api/openstack/placement/policies/allocation.py deleted file mode 100644 index a5f1c2e0017..00000000000 --- a/nova/api/openstack/placement/policies/allocation.py +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from nova.api.openstack.placement.policies import base - - -RP_ALLOC_LIST = 'placement:resource_providers:allocations:list' - -ALLOC_PREFIX = 'placement:allocations:%s' -ALLOC_LIST = ALLOC_PREFIX % 'list' -ALLOC_MANAGE = ALLOC_PREFIX % 'manage' -ALLOC_UPDATE = ALLOC_PREFIX % 'update' -ALLOC_DELETE = ALLOC_PREFIX % 'delete' - -rules = [ - policy.DocumentedRuleDefault( - ALLOC_MANAGE, - base.RULE_ADMIN_API, - "Manage allocations.", - [ - { - 'method': 'POST', - 'path': '/allocations' - } - ], - scope_types=['system'], - ), - policy.DocumentedRuleDefault( - ALLOC_LIST, - base.RULE_ADMIN_API, - "List allocations.", - [ - { - 'method': 'GET', - 'path': '/allocations/{consumer_uuid}' - } - ], - scope_types=['system'] - ), - policy.DocumentedRuleDefault( - ALLOC_UPDATE, - base.RULE_ADMIN_API, - "Update allocations.", - [ - { - 'method': 'PUT', - 'path': '/allocations/{consumer_uuid}' - } - ], - scope_types=['system'], - ), - policy.DocumentedRuleDefault( - ALLOC_DELETE, - base.RULE_ADMIN_API, - "Delete allocations.", - [ - { - 'method': 'DELETE', - 'path': '/allocations/{consumer_uuid}' - } - ], - scope_types=['system'], - ), - policy.DocumentedRuleDefault( - RP_ALLOC_LIST, - base.RULE_ADMIN_API, - "List resource provider allocations.", - [ - { - 'method': 'GET', - 'path': '/resource_providers/{uuid}/allocations' - } - ], - scope_types=['system'], - ), -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policies/allocation_candidate.py b/nova/api/openstack/placement/policies/allocation_candidate.py deleted file mode 100644 index e2ae655370d..00000000000 --- a/nova/api/openstack/placement/policies/allocation_candidate.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from nova.api.openstack.placement.policies import base - - -LIST = 'placement:allocation_candidates:list' - -rules = [ - policy.DocumentedRuleDefault( - LIST, - base.RULE_ADMIN_API, - "List allocation candidates.", - [ - { - 'method': 'GET', - 'path': '/allocation_candidates' - } - ], - scope_types=['system'], - ) -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policies/base.py b/nova/api/openstack/placement/policies/base.py deleted file mode 100644 index 1e728a37fad..00000000000 --- a/nova/api/openstack/placement/policies/base.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_policy import policy - -RULE_ADMIN_API = 'rule:admin_api' - -rules = [ - # "placement" is the default rule (action) used for all routes that do - # not yet have granular policy rules. It is used in - # PlacementHandler.__call__ and can be dropped once all routes have - # granular policy handling. - policy.RuleDefault( - "placement", - "role:admin", - description="This rule is used for all routes that do not yet " - "have granular policy rules. It will be replaced " - "with rule:admin_api.", - deprecated_for_removal=True, - deprecated_reason="This was a catch-all rule hard-coded into " - "the placement service and has been superseded by " - "granular policy rules per operation.", - deprecated_since="18.0.0"), - policy.RuleDefault( - "admin_api", - "role:admin", - description="Default rule for most placement APIs.", - scope_types=['system']), -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policies/inventory.py b/nova/api/openstack/placement/policies/inventory.py deleted file mode 100644 index 1f3d38f413f..00000000000 --- a/nova/api/openstack/placement/policies/inventory.py +++ /dev/null @@ -1,95 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from nova.api.openstack.placement.policies import base - - -PREFIX = 'placement:resource_providers:inventories:%s' -LIST = PREFIX % 'list' -CREATE = PREFIX % 'create' -SHOW = PREFIX % 'show' -UPDATE = PREFIX % 'update' -DELETE = PREFIX % 'delete' -BASE_PATH = '/resource_providers/{uuid}/inventories' - -rules = [ - policy.DocumentedRuleDefault( - LIST, - base.RULE_ADMIN_API, - "List resource provider inventories.", - [ - { - 'method': 'GET', - 'path': BASE_PATH - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - CREATE, - base.RULE_ADMIN_API, - "Create one resource provider inventory.", - [ - { - 'method': 'POST', - 'path': BASE_PATH - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - SHOW, - base.RULE_ADMIN_API, - "Show resource provider inventory.", - [ - { - 'method': 'GET', - 'path': BASE_PATH + '/{resource_class}' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - UPDATE, - base.RULE_ADMIN_API, - "Update resource provider inventory.", - [ - { - 'method': 'PUT', - 'path': BASE_PATH - }, - { - 'method': 'PUT', - 'path': BASE_PATH + '/{resource_class}' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - DELETE, - base.RULE_ADMIN_API, - "Delete resource provider inventory.", - [ - { - 'method': 'DELETE', - 'path': BASE_PATH - }, - { - 'method': 'DELETE', - 'path': BASE_PATH + '/{resource_class}' - } - ], - scope_types=['system']), -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policies/resource_class.py b/nova/api/openstack/placement/policies/resource_class.py deleted file mode 100644 index 75acab9d3b7..00000000000 --- a/nova/api/openstack/placement/policies/resource_class.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from nova.api.openstack.placement.policies import base - - -PREFIX = 'placement:resource_classes:%s' -LIST = PREFIX % 'list' -CREATE = PREFIX % 'create' -SHOW = PREFIX % 'show' -UPDATE = PREFIX % 'update' -DELETE = PREFIX % 'delete' - -rules = [ - policy.DocumentedRuleDefault( - LIST, - base.RULE_ADMIN_API, - "List resource classes.", - [ - { - 'method': 'GET', - 'path': '/resource_classes' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - CREATE, - base.RULE_ADMIN_API, - "Create resource class.", - [ - { - 'method': 'POST', - 'path': '/resource_classes' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - SHOW, - base.RULE_ADMIN_API, - "Show resource class.", - [ - { - 'method': 'GET', - 'path': '/resource_classes/{name}' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - UPDATE, - base.RULE_ADMIN_API, - "Update resource class.", - [ - { - 'method': 'PUT', - 'path': '/resource_classes/{name}' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - DELETE, - base.RULE_ADMIN_API, - "Delete resource class.", - [ - { - 'method': 'DELETE', - 'path': '/resource_classes/{name}' - } - ], - scope_types=['system']), -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policies/resource_provider.py b/nova/api/openstack/placement/policies/resource_provider.py deleted file mode 100644 index 7c4826bd705..00000000000 --- a/nova/api/openstack/placement/policies/resource_provider.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from nova.api.openstack.placement.policies import base - - -PREFIX = 'placement:resource_providers:%s' -LIST = PREFIX % 'list' -CREATE = PREFIX % 'create' -SHOW = PREFIX % 'show' -UPDATE = PREFIX % 'update' -DELETE = PREFIX % 'delete' - -rules = [ - policy.DocumentedRuleDefault( - LIST, - base.RULE_ADMIN_API, - "List resource providers.", - [ - { - 'method': 'GET', - 'path': '/resource_providers' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - CREATE, - base.RULE_ADMIN_API, - "Create resource provider.", - [ - { - 'method': 'POST', - 'path': '/resource_providers' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - SHOW, - base.RULE_ADMIN_API, - "Show resource provider.", - [ - { - 'method': 'GET', - 'path': '/resource_providers/{uuid}' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - UPDATE, - base.RULE_ADMIN_API, - "Update resource provider.", - [ - { - 'method': 'PUT', - 'path': '/resource_providers/{uuid}' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - DELETE, - base.RULE_ADMIN_API, - "Delete resource provider.", - [ - { - 'method': 'DELETE', - 'path': '/resource_providers/{uuid}' - } - ], - scope_types=['system']), -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policies/trait.py b/nova/api/openstack/placement/policies/trait.py deleted file mode 100644 index 6b35a703de5..00000000000 --- a/nova/api/openstack/placement/policies/trait.py +++ /dev/null @@ -1,120 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from nova.api.openstack.placement.policies import base - - -RP_TRAIT_PREFIX = 'placement:resource_providers:traits:%s' -RP_TRAIT_LIST = RP_TRAIT_PREFIX % 'list' -RP_TRAIT_UPDATE = RP_TRAIT_PREFIX % 'update' -RP_TRAIT_DELETE = RP_TRAIT_PREFIX % 'delete' - -TRAITS_PREFIX = 'placement:traits:%s' -TRAITS_LIST = TRAITS_PREFIX % 'list' -TRAITS_SHOW = TRAITS_PREFIX % 'show' -TRAITS_UPDATE = TRAITS_PREFIX % 'update' -TRAITS_DELETE = TRAITS_PREFIX % 'delete' - - -rules = [ - policy.DocumentedRuleDefault( - TRAITS_LIST, - base.RULE_ADMIN_API, - "List traits.", - [ - { - 'method': 'GET', - 'path': '/traits' - } - ], - scope_types=['system'] - ), - policy.DocumentedRuleDefault( - TRAITS_SHOW, - base.RULE_ADMIN_API, - "Show trait.", - [ - { - 'method': 'GET', - 'path': '/traits/{name}' - } - ], - scope_types=['system'], - ), - policy.DocumentedRuleDefault( - TRAITS_UPDATE, - base.RULE_ADMIN_API, - "Update trait.", - [ - { - 'method': 'PUT', - 'path': '/traits/{name}' - } - ], - scope_types=['system'], - ), - policy.DocumentedRuleDefault( - TRAITS_DELETE, - base.RULE_ADMIN_API, - "Delete trait.", - [ - { - 'method': 'DELETE', - 'path': '/traits/{name}' - } - ], - scope_types=['system'], - ), - policy.DocumentedRuleDefault( - RP_TRAIT_LIST, - base.RULE_ADMIN_API, - "List resource provider traits.", - [ - { - 'method': 'GET', - 'path': '/resource_providers/{uuid}/traits' - } - ], - scope_types=['system'], - ), - policy.DocumentedRuleDefault( - RP_TRAIT_UPDATE, - base.RULE_ADMIN_API, - "Update resource provider traits.", - [ - { - 'method': 'PUT', - 'path': '/resource_providers/{uuid}/traits' - } - ], - scope_types=['system'], - ), - policy.DocumentedRuleDefault( - RP_TRAIT_DELETE, - base.RULE_ADMIN_API, - "Delete resource provider traits.", - [ - { - 'method': 'DELETE', - 'path': '/resource_providers/{uuid}/traits' - } - ], - scope_types=['system'], - ), -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policies/usage.py b/nova/api/openstack/placement/policies/usage.py deleted file mode 100644 index 6543fa4359d..00000000000 --- a/nova/api/openstack/placement/policies/usage.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_policy import policy - -from nova.api.openstack.placement.policies import base - - -PROVIDER_USAGES = 'placement:resource_providers:usages' -TOTAL_USAGES = 'placement:usages' - - -rules = [ - policy.DocumentedRuleDefault( - PROVIDER_USAGES, - base.RULE_ADMIN_API, - "List resource provider usages.", - [ - { - 'method': 'GET', - 'path': '/resource_providers/{uuid}/usages' - } - ], - scope_types=['system']), - policy.DocumentedRuleDefault( - # TODO(mriedem): At some point we might set scope_types=['project'] - # so that non-admin project-scoped token users can query usages for - # their project. The context.can() target will need to change as well - # in the actual policy enforcement check in the handler code. - TOTAL_USAGES, - base.RULE_ADMIN_API, - "List total resource usages for a given project.", - [ - { - 'method': 'GET', - 'path': '/usages' - } - ], - scope_types=['system']) -] - - -def list_rules(): - return rules diff --git a/nova/api/openstack/placement/policy.py b/nova/api/openstack/placement/policy.py deleted file mode 100644 index cad6fdf8388..00000000000 --- a/nova/api/openstack/placement/policy.py +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Policy Enforcement for placement API.""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_policy import policy -from oslo_utils import excutils - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import policies - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) -_ENFORCER_PLACEMENT = None - - -def reset(): - """Used to reset the global _ENFORCER_PLACEMENT between test runs.""" - global _ENFORCER_PLACEMENT - if _ENFORCER_PLACEMENT: - _ENFORCER_PLACEMENT.clear() - _ENFORCER_PLACEMENT = None - - -def init(): - """Init an Enforcer class. Sets the _ENFORCER_PLACEMENT global.""" - global _ENFORCER_PLACEMENT - if not _ENFORCER_PLACEMENT: - # NOTE(mriedem): We have to explicitly pass in the - # [placement]/policy_file path because otherwise oslo_policy defaults - # to read the policy file from config option [oslo_policy]/policy_file - # which is used by nova. In other words, to have separate policy files - # for placement and nova, we have to use separate policy_file options. - _ENFORCER_PLACEMENT = policy.Enforcer( - CONF, policy_file=CONF.placement.policy_file) - _ENFORCER_PLACEMENT.register_defaults(policies.list_rules()) - _ENFORCER_PLACEMENT.load_rules() - - -def get_enforcer(): - # This method is used by oslopolicy CLI scripts in order to generate policy - # files from overrides on disk and defaults in code. We can just pass an - # empty list and let oslo do the config lifting for us. - # TODO(mriedem): Change the project kwarg value to "placement" once - # this code is extracted from nova. - cfg.CONF([], project='nova') - init() - return _ENFORCER_PLACEMENT - - -def authorize(context, action, target, do_raise=True): - """Verifies that the action is valid on the target in this context. - - :param context: instance of - nova.api.openstack.placement.context.RequestContext - :param action: string representing the action to be checked - this should be colon separated for clarity, i.e. - ``placement:resource_providers:list`` - :param target: dictionary representing the object of the action; - for object creation this should be a dictionary representing the - owner of the object e.g. ``{'project_id': context.project_id}``. - :param do_raise: if True (the default), raises PolicyNotAuthorized; - if False, returns False - :raises nova.api.openstack.placement.exception.PolicyNotAuthorized: if - verification fails and do_raise is True. - :returns: non-False value (not necessarily "True") if authorized, and the - exact value False if not authorized and do_raise is False. - """ - init() - credentials = context.to_policy_values() - try: - # NOTE(mriedem): The "action" kwarg is for the PolicyNotAuthorized exc. - return _ENFORCER_PLACEMENT.authorize( - action, target, credentials, do_raise=do_raise, - exc=exception.PolicyNotAuthorized, action=action) - except policy.PolicyNotRegistered: - with excutils.save_and_reraise_exception(): - LOG.exception('Policy not registered') - except Exception: - with excutils.save_and_reraise_exception(): - LOG.debug('Policy check for %(action)s failed with credentials ' - '%(credentials)s', - {'action': action, 'credentials': credentials}) diff --git a/nova/api/openstack/placement/requestlog.py b/nova/api/openstack/placement/requestlog.py deleted file mode 100644 index da7be6a37f4..00000000000 --- a/nova/api/openstack/placement/requestlog.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Simple middleware for request logging.""" - -from oslo_log import log as logging - -from nova.api.openstack.placement import microversion - -LOG = logging.getLogger(__name__) - - -class RequestLog(object): - """WSGI Middleware to write a simple request log to. - - Borrowed from Paste Translogger - """ - - format = ('%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" ' - 'status: %(status)s len: %(bytes)s ' - 'microversion: %(microversion)s') - - def __init__(self, application): - self.application = application - - def __call__(self, environ, start_response): - LOG.debug('Starting request: %s "%s %s"', - environ['REMOTE_ADDR'], environ['REQUEST_METHOD'], - self._get_uri(environ)) - # Set the accept header if it is not otherwise set or is '*/*'. This - # ensures that error responses will be in JSON. - accept = environ.get('HTTP_ACCEPT') - if not accept or accept == '*/*': - environ['HTTP_ACCEPT'] = 'application/json' - if LOG.isEnabledFor(logging.INFO): - return self._log_app(environ, start_response) - else: - return self.application(environ, start_response) - - @staticmethod - def _get_uri(environ): - req_uri = (environ.get('SCRIPT_NAME', '') - + environ.get('PATH_INFO', '')) - if environ.get('QUERY_STRING'): - req_uri += '?' + environ['QUERY_STRING'] - return req_uri - - def _log_app(self, environ, start_response): - req_uri = self._get_uri(environ) - - def replacement_start_response(status, headers, exc_info=None): - """We need to gaze at the content-length, if set, to - write log info. - """ - size = None - for name, value in headers: - if name.lower() == 'content-length': - size = value - self.write_log(environ, req_uri, status, size) - return start_response(status, headers, exc_info) - - return self.application(environ, replacement_start_response) - - def write_log(self, environ, req_uri, status, size): - """Write the log info out in a formatted form to ``LOG.info``. - """ - if size is None: - size = '-' - log_format = { - 'REMOTE_ADDR': environ.get('REMOTE_ADDR', '-'), - 'REQUEST_METHOD': environ['REQUEST_METHOD'], - 'REQUEST_URI': req_uri, - 'status': status.split(None, 1)[0], - 'bytes': size, - 'microversion': environ.get( - microversion.MICROVERSION_ENVIRON, '-'), - } - LOG.info(self.format, log_format) diff --git a/nova/api/openstack/placement/resource_class_cache.py b/nova/api/openstack/placement/resource_class_cache.py deleted file mode 100644 index a72b4177ea6..00000000000 --- a/nova/api/openstack/placement/resource_class_cache.py +++ /dev/null @@ -1,154 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_concurrency import lockutils -import sqlalchemy as sa - -from nova.api.openstack.placement import db_api -from nova.api.openstack.placement import exception -from nova.db.sqlalchemy import api_models as models -from nova import rc_fields as fields - -_RC_TBL = models.ResourceClass.__table__ -_LOCKNAME = 'rc_cache' - - -@db_api.placement_context_manager.reader -def _refresh_from_db(ctx, cache): - """Grabs all custom resource classes from the DB table and populates the - supplied cache object's internal integer and string identifier dicts. - - :param cache: ResourceClassCache object to refresh. - """ - with db_api.placement_context_manager.reader.connection.using(ctx) as conn: - sel = sa.select([_RC_TBL.c.id, _RC_TBL.c.name, _RC_TBL.c.updated_at, - _RC_TBL.c.created_at]) - res = conn.execute(sel).fetchall() - cache.id_cache = {r[1]: r[0] for r in res} - cache.str_cache = {r[0]: r[1] for r in res} - cache.all_cache = {r[1]: r for r in res} - - -class ResourceClassCache(object): - """A cache of integer and string lookup values for resource classes.""" - - # List of dict of all standard resource classes, where every list item - # have a form {'id': , 'name': } - STANDARDS = [{'id': fields.ResourceClass.STANDARD.index(s), 'name': s, - 'updated_at': None, 'created_at': None} - for s in fields.ResourceClass.STANDARD] - - def __init__(self, ctx): - """Initialize the cache of resource class identifiers. - - :param ctx: `nova.context.RequestContext` from which we can grab a - `SQLAlchemy.Connection` object to use for any DB lookups. - """ - self.ctx = ctx - self.id_cache = {} - self.str_cache = {} - self.all_cache = {} - - def clear(self): - with lockutils.lock(_LOCKNAME): - self.id_cache = {} - self.str_cache = {} - self.all_cache = {} - - def id_from_string(self, rc_str): - """Given a string representation of a resource class -- e.g. "DISK_GB" - or "IRON_SILVER" -- return the integer code for the resource class. For - standard resource classes, this integer code will match the list of - resource classes on the fields.ResourceClass field type. Other custom - resource classes will cause a DB lookup into the resource_classes - table, however the results of these DB lookups are cached since the - lookups are so frequent. - - :param rc_str: The string representation of the resource class to look - up a numeric identifier for. - :returns integer identifier for the resource class, or None, if no such - resource class was found in the list of standard resource - classes or the resource_classes database table. - :raises `exception.ResourceClassNotFound` if rc_str cannot be found in - either the standard classes or the DB. - """ - # First check the standard resource classes - if rc_str in fields.ResourceClass.STANDARD: - return fields.ResourceClass.STANDARD.index(rc_str) - - with lockutils.lock(_LOCKNAME): - if rc_str in self.id_cache: - return self.id_cache[rc_str] - # Otherwise, check the database table - _refresh_from_db(self.ctx, self) - if rc_str in self.id_cache: - return self.id_cache[rc_str] - raise exception.ResourceClassNotFound(resource_class=rc_str) - - def all_from_string(self, rc_str): - """Given a string representation of a resource class -- e.g. "DISK_GB" - or "CUSTOM_IRON_SILVER" -- return all the resource class info. - - :param rc_str: The string representation of the resource class for - which to look up a resource_class. - :returns: dict representing the resource class fields, if the - resource class was found in the list of standard - resource classes or the resource_classes database table. - :raises: `exception.ResourceClassNotFound` if rc_str cannot be found in - either the standard classes or the DB. - """ - # First check the standard resource classes - if rc_str in fields.ResourceClass.STANDARD: - return {'id': fields.ResourceClass.STANDARD.index(rc_str), - 'name': rc_str, - 'updated_at': None, - 'created_at': None} - - with lockutils.lock(_LOCKNAME): - if rc_str in self.all_cache: - return self.all_cache[rc_str] - # Otherwise, check the database table - _refresh_from_db(self.ctx, self) - if rc_str in self.all_cache: - return self.all_cache[rc_str] - raise exception.ResourceClassNotFound(resource_class=rc_str) - - def string_from_id(self, rc_id): - """The reverse of the id_from_string() method. Given a supplied numeric - identifier for a resource class, we look up the corresponding string - representation, either in the list of standard resource classes or via - a DB lookup. The results of these DB lookups are cached since the - lookups are so frequent. - - :param rc_id: The numeric representation of the resource class to look - up a string identifier for. - :returns: string identifier for the resource class, or None, if no such - resource class was found in the list of standard resource - classes or the resource_classes database table. - :raises `exception.ResourceClassNotFound` if rc_id cannot be found in - either the standard classes or the DB. - """ - # First check the fields.ResourceClass.STANDARD values - try: - return fields.ResourceClass.STANDARD[rc_id] - except IndexError: - pass - - with lockutils.lock(_LOCKNAME): - if rc_id in self.str_cache: - return self.str_cache[rc_id] - - # Otherwise, check the database table - _refresh_from_db(self.ctx, self) - if rc_id in self.str_cache: - return self.str_cache[rc_id] - raise exception.ResourceClassNotFound(resource_class=rc_id) diff --git a/nova/api/openstack/placement/rest_api_version_history.rst b/nova/api/openstack/placement/rest_api_version_history.rst deleted file mode 100644 index 47c6c8f31e6..00000000000 --- a/nova/api/openstack/placement/rest_api_version_history.rst +++ /dev/null @@ -1,506 +0,0 @@ -REST API Version History -~~~~~~~~~~~~~~~~~~~~~~~~ - -This documents the changes made to the REST API with every microversion change. -The description for each version should be a verbose one which has enough -information to be suitable for use in user documentation. - -.. _1.0 (Maximum in Newton): - -1.0 Initial Version (Maximum in Newton) ---------------------------------------- - -.. versionadded:: Newton - -This is the initial version of the placement REST API that was released in -Nova 14.0.0 (Newton). This contains the following routes: - -* ``/resource_providers`` -* ``/resource_providers/allocations`` -* ``/resource_providers/inventories`` -* ``/resource_providers/usages`` -* ``/allocations`` - -1.1 Resource provider aggregates --------------------------------- - -.. versionadded:: Ocata - -The 1.1 version adds support for associating aggregates with resource -providers. - -The following new operations are added: - -``GET /resource_providers/{uuid}/aggregates`` - Return all aggregates associated with a resource provider - -``PUT /resource_providers/{uuid}/aggregates`` - Update the aggregates associated with a resource provider - -1.2 Add custom resource classes -------------------------------- - -.. versionadded:: Ocata - -Placement API version 1.2 adds basic operations allowing an admin to create, -list and delete custom resource classes. - -The following new routes are added: - -``GET /resource_classes`` - Return all resource classes - -``POST /resource_classes`` - Create a new custom resource class - -``PUT /resource_classes/{name}`` - Update the name of a custom resource class - -``DELETE /resource_classes/{name}`` - Delete a custom resource class - -``GET /resource_classes/{name}`` - Get a single resource class - -Custom resource classes must begin with the prefix ``CUSTOM_`` and contain only -the letters A through Z, the numbers 0 through 9 and the underscore ``_`` -character. - -1.3 member_of query parameter ------------------------------ - -.. versionadded:: Ocata - -Version 1.3 adds support for listing resource providers that are members of any -of the list of aggregates provided using a ``member_of`` query parameter:: - - ?member_of=in:{agg1_uuid},{agg2_uuid},{agg3_uuid} - -1.4 Filter resource providers by requested resource capacity (Maximum in Ocata) -------------------------------------------------------------------------------- - -.. versionadded:: Ocata - -The 1.4 version adds support for querying resource providers that have the -ability to serve a requested set of resources. A new "resources" query string -parameter is now accepted to the ``GET /resource_providers`` API call. This -parameter indicates the requested amounts of various resources that a provider -must have the capacity to serve. The "resources" query string parameter takes -the form:: - - ?resources=$RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT - -For instance, if the user wishes to see resource providers that can service a -request for 2 vCPUs, 1024 MB of RAM and 50 GB of disk space, the user can issue -a request to:: - - GET /resource_providers?resources=VCPU:2,MEMORY_MB:1024,DISK_GB:50 - -If the resource class does not exist, then it will return a HTTP 400. - -.. note:: The resources filtering is also based on the `min_unit`, `max_unit` - and `step_size` of the inventory record. For example, if the `max_unit` is - 512 for the DISK_GB inventory for a particular resource provider and a - GET request is made for `DISK_GB:1024`, that resource provider will not be - returned. The `min_unit` is the minimum amount of resource that can be - requested for a given inventory and resource provider. The `step_size` is - the increment of resource that can be requested for a given resource on a - given provider. - -1.5 DELETE all inventory for a resource provider ------------------------------------------------- - -.. versionadded:: Pike - -Placement API version 1.5 adds DELETE method for deleting all inventory for a -resource provider. The following new method is supported: - -``DELETE /resource_providers/{uuid}/inventories`` - - Delete all inventories for a given resource provider - -1.6 Traits API --------------- - -.. versionadded:: Pike - -The 1.6 version adds basic operations allowing an admin to create, list, and -delete custom traits, also adds basic operations allowing an admin to attach -traits to a resource provider. - -The following new routes are added: - -``GET /traits`` - Return all resource classes. - -``PUT /traits/{name}`` - Insert a single custom trait. - -``GET /traits/{name}`` - Check if a trait name exists. - -``DELETE /traits/{name}`` - Delete the specified trait. - -``GET /resource_providers/{uuid}/traits`` - Return all traits associated with a specific resource provider. - -``PUT /resource_providers/{uuid}/traits`` - Update all traits for a specific resource provider. - -``DELETE /resource_providers/{uuid}/traits`` - Remove any existing trait associations for a specific resource provider - -Custom traits must begin with the prefix ``CUSTOM_`` and contain only the -letters A through Z, the numbers 0 through 9 and the underscore ``_`` -character. - -1.7 Idempotent PUT /resource_classes/{name} -------------------------------------------- - -.. versionadded:: Pike - -The 1.7 version changes handling of ``PUT /resource_classes/{name}`` to be a -create or verification of the resource class with ``{name}``. If the resource -class is a custom resource class and does not already exist it will be created -and a ``201`` response code returned. If the class already exists the response -code will be ``204``. This makes it possible to check or create a resource -class in one request. - -1.8 Require placement 'project_id', 'user_id' in PUT /allocations ------------------------------------------------------------------ - -.. versionadded:: Pike - -The 1.8 version adds ``project_id`` and ``user_id`` required request parameters -to ``PUT /allocations``. - -1.9 Add GET /usages --------------------- - -.. versionadded:: Pike - -The 1.9 version adds usages that can be queried by a project or project/user. - -The following new routes are added: - -``GET /usages?project_id=`` - Return all usages for a given project. - -``GET /usages?project_id=&user_id=`` - Return all usages for a given project and user. - -1.10 Allocation candidates (Maximum in Pike) --------------------------------------------- - -.. versionadded:: Pike - -The 1.10 version brings a new REST resource endpoint for getting a list of -allocation candidates. Allocation candidates are collections of possible -allocations against resource providers that can satisfy a particular request -for resources. - -1.11 Add 'allocations' link to the ``GET /resource_providers`` response ------------------------------------------------------------------------ - -.. versionadded:: Queens - -The ``/resource_providers/{rp_uuid}/allocations`` endpoint has been available -since version 1.0, but was not listed in the ``links`` section of the -``GET /resource_providers`` response. The link is included as of version 1.11. - -1.12 PUT dict format to /allocations/{consumer_uuid} ----------------------------------------------------- - -.. versionadded:: Queens - -In version 1.12 the request body of a ``PUT /allocations/{consumer_uuid}`` -is expected to have an ``object`` for the ``allocations`` property, not as -``array`` as with earlier microversions. This puts the request body more in -alignment with the structure of the ``GET /allocations/{consumer_uuid}`` -response body. Because the ``PUT`` request requires ``user_id`` and -``project_id`` in the request body, these fields are added to the ``GET`` -response. In addition, the response body for ``GET /allocation_candidates`` -is updated so the allocations in the ``alocation_requests`` object work -with the new ``PUT`` format. - -1.13 POST multiple allocations to /allocations ----------------------------------------------- - -.. versionadded:: Queens - -Version 1.13 gives the ability to set or clear allocations for more than -one consumer UUID with a request to ``POST /allocations``. - -1.14 Add nested resource providers ----------------------------------- - -.. versionadded:: Queens - -The 1.14 version introduces the concept of nested resource providers. The -resource provider resource now contains two new attributes: - -* ``parent_provider_uuid`` indicates the provider's direct parent, or null if - there is no parent. This attribute can be set in the call to ``POST - /resource_providers`` and ``PUT /resource_providers/{uuid}`` if the attribute - has not already been set to a non-NULL value (i.e. we do not support - "reparenting" a provider) -* ``root_provider_uuid`` indicates the UUID of the root resource provider in - the provider's tree. This is a read-only attribute - -A new ``in_tree=`` parameter is now available in the ``GET -/resource-providers`` API call. Supplying a UUID value for the ``in_tree`` -parameter will cause all resource providers within the "provider tree" of the -provider matching ```` to be returned. - -1.15 Add 'last-modified' and 'cache-control' headers ----------------------------------------------------- - -.. versionadded:: Queens - -Throughout the API, 'last-modified' headers have been added to GET responses -and those PUT and POST responses that have bodies. The value is either the -actual last modified time of the most recently modified associated database -entity or the current time if there is no direct mapping to the database. In -addition, 'cache-control: no-cache' headers are added where the 'last-modified' -header has been added to prevent inadvertent caching of resources. - -1.16 Limit allocation candidates --------------------------------- - -.. versionadded:: Queens - -Add support for a ``limit`` query parameter when making a -``GET /allocation_candidates`` request. The parameter accepts an integer -value, ``N``, which limits the maximum number of candidates returned. - -1.17 Add 'required' parameter to the allocation candidates (Maximum in Queens) ------------------------------------------------------------------------------- - -.. versionadded:: Queens - -Add the ``required`` parameter to the ``GET /allocation_candidates`` API. It -accepts a list of traits separated by ``,``. The provider summary in the -response will include the attached traits also. - -1.18 Support ?required= queryparam on GET /resource_providers ---------------------------------------------------------------------- - -.. versionadded:: Rocky - -Add support for the ``required`` query parameter to the ``GET -/resource_providers`` API. It accepts a comma-separated list of string trait -names. When specified, the API results will be filtered to include only -resource providers marked with all the specified traits. This is in addition to -(logical AND) any filtering based on other query parameters. - -Trait names which are empty, do not exist, or are otherwise invalid will result -in a 400 error. - -1.19 Include generation and conflict detection in provider aggregates APIs --------------------------------------------------------------------------- - -.. versionadded:: Rocky - -Enhance the payloads for the ``GET /resource_providers/{uuid}/aggregates`` -response and the ``PUT /resource_providers/{uuid}/aggregates`` request and -response to be identical, and to include the ``resource_provider_generation``. -As with other generation-aware APIs, if the ``resource_provider_generation`` -specified in the ``PUT`` request does not match the generation known by the -server, a 409 Conflict error is returned. - -1.20 Return 200 with provider payload from POST /resource_providers -------------------------------------------------------------------- - -.. versionadded:: Rocky - -The ``POST /resource_providers`` API, on success, returns 200 with a payload -representing the newly-created resource provider, in the same format as the -corresponding ``GET /resource_providers/{uuid}`` call. This is to allow the -caller to glean automatically-set fields, such as UUID and generation, without -a subsequent GET. - -1.21 Support ?member_of= queryparam on GET /allocation_candidates ------------------------------------------------------------------------------ - -.. versionadded:: Rocky - -Add support for the ``member_of`` query parameter to the ``GET -/allocation_candidates`` API. It accepts a comma-separated list of UUIDs for -aggregates. Note that if more than one aggregate UUID is passed, the -comma-separated list must be prefixed with the "in:" operator. If this -parameter is provided, the only resource providers returned will be those in -one of the specified aggregates that meet the other parts of the request. - -1.22 Support forbidden traits on resource providers and allocations candidates ------------------------------------------------------------------------------- - -.. versionadded:: Rocky - -Add support for expressing traits which are forbidden when filtering -``GET /resource_providers`` or ``GET /allocation_candidates``. A forbidden -trait is a properly formatted trait in the existing ``required`` parameter, -prefixed by a ``!``. For example ``required=!STORAGE_DISK_SSD`` asks that the -results not include any resource providers that provide solid state disk. - -1.23 Include code attribute in JSON error responses ---------------------------------------------------- - -.. versionadded:: Rocky - -JSON formatted error responses gain a new attribute, ``code``, with a value -that identifies the type of this error. This can be used to distinguish errors -that are different but use the same HTTP status code. Any error response which -does not specifically define a code will have the code -``placement.undefined_code``. - -1.24 Support multiple ?member_of queryparams --------------------------------------------- - -.. versionadded:: Rocky - -Add support for specifying multiple ``member_of`` query parameters to the ``GET -/resource_providers`` API. When multiple ``member_of`` query parameters are -found, they are AND'd together in the final query. For example, issuing a -request for ``GET /resource_providers?member_of=agg1&member_of=agg2`` means get -the resource providers that are associated with BOTH agg1 and agg2. Issuing a -request for ``GET /resource_providers?member_of=in:agg1,agg2&member_of=agg3`` -means get the resource providers that are associated with agg3 and are also -associated with *any of* (agg1, agg2). - -1.25 Granular resource requests to ``GET /allocation_candidates`` ------------------------------------------------------------------ - -.. versionadded:: Rocky - -``GET /allocation_candidates`` is enhanced to accept numbered groupings of -resource, required/forbidden trait, and aggregate association requests. A -``resources`` query parameter key with a positive integer suffix (e.g. -``resources42``) will be logically associated with ``required`` and/or -``member_of`` query parameter keys with the same suffix (e.g. ``required42``, -``member_of42``). The resources, required/forbidden traits, and aggregate -associations in that group will be satisfied by the same resource provider in -the response. When more than one numbered grouping is supplied, the -``group_policy`` query parameter is required to indicate how the groups should -interact. With ``group_policy=none``, separate groupings - numbered or -unnumbered - may or may not be satisfied by the same provider. With -``group_policy=isolate``, numbered groups are guaranteed to be satisfied by -*different* providers - though there may still be overlap with the unnumbered -group. In all cases, each ``allocation_request`` will be satisfied by providers -in a single non-sharing provider tree and/or sharing providers associated via -aggregate with any of the providers in that tree. - -The ``required`` and ``member_of`` query parameters for a given group are -optional. That is, you may specify ``resources42=XXX`` without a corresponding -``required42=YYY`` or ``member_of42=ZZZ``. However, the reverse (specifying -``required42=YYY`` or ``member_of42=ZZZ`` without ``resources42=XXX``) will -result in an error. - -The semantic of the (unnumbered) ``resources``, ``required``, and ``member_of`` -query parameters is unchanged: the resources, traits, and aggregate -associations specified thereby may be satisfied by any provider in the same -non-sharing tree or associated via the specified aggregate(s). - -1.26 Allow inventories to have reserved value equal to total ------------------------------------------------------------- - -.. versionadded:: Rocky - -Starting with this version, it is allowed to set the reserved value of the -resource provider inventory to be equal to total. - -1.27 Include all resource class inventories in provider_summaries ------------------------------------------------------------------ - -.. versionadded:: Rocky - -Include all resource class inventories in the ``provider_summaries`` field in -response of the ``GET /allocation_candidates`` API even if the resource class -is not in the requested resources. - -1.28 Consumer generation support --------------------------------- - -.. versionadded:: Rocky - -A new generation field has been added to the consumer concept. Consumers are -the actors that are allocated resources in the placement API. When an -allocation is created, a consumer UUID is specified. Starting with microversion -1.8, a project and user ID are also required. If using microversions prior to -1.8, these are populated from the ``incomplete_consumer_project_id`` and -``incomplete_consumer_user_id`` config options from the ``[placement]`` -section. - -The consumer generation facilitates safe concurrent modification of an -allocation. - -A consumer generation is now returned from the following URIs: - -``GET /resource_providers/{uuid}/allocations`` - -The response continues to be a dict with a key of ``allocations``, which itself -is a dict, keyed by consumer UUID, of allocations against the resource -provider. For each of those dicts, a ``consumer_generation`` field will now be -shown. - -``GET /allocations/{consumer_uuid}`` - -The response continues to be a dict with a key of ``allocations``, which -itself is a dict, keyed by resource provider UUID, of allocations being -consumed by the consumer with the ``{consumer_uuid}``. The top-level dict will -also now contain a ``consumer_generation`` field. - -The value of the ``consumer_generation`` field is opaque and should only be -used to send back to subsequent operations on the consumer's allocations. - -The ``PUT /allocations/{consumer_uuid}`` URI has been modified to now require a -``consumer_generation`` field in the request payload. This field is required to -be ``null`` if the caller expects that there are no allocations already -existing for the consumer. Otherwise, it should contain the generation that the -caller understands the consumer to be at the time of the call. - -A ``409 Conflict`` will be returned from ``PUT /allocations/{consumer_uuid}`` -if there was a mismatch between the supplied generation and the consumer's -generation as known by the server. Similarly, a ``409 Conflict`` will be -returned if during the course of replacing the consumer's allocations another -process concurrently changed the consumer's allocations. This allows the caller -to react to the concurrent write by re-reading the consumer's allocations and -re-issuing the call to replace allocations as needed. - -The ``PUT /allocations/{consumer_uuid}`` URI has also been modified to accept -an empty allocations object, thereby bringing it to parity with the behaviour -of ``POST /allocations``, which uses an empty allocations object to indicate -that the allocations for a particular consumer should be removed. Passing an -empty allocations object along with a ``consumer_generation`` makes ``PUT -/allocations/{consumer_uuid}`` a **safe** way to delete allocations for a -consumer. The ``DELETE /allocations/{consumer_uuid}`` URI remains unsafe to -call in deployments where multiple callers may simultaneously be attempting to -modify a consumer's allocations. - -The ``POST /allocations`` URI variant has also been changed to require a -``consumer_generation`` field in the request payload **for each consumer -involved in the request**. Similar responses to ``PUT -/allocations/{consumer_uuid}`` are returned when any of the consumers -generations conflict with the server's view of those consumers or if any of the -consumers involved in the request are modified by another process. - -.. warning:: In all cases, it is absolutely **NOT SAFE** to create and modify - allocations for a consumer using different microversions where one - of the microversions is prior to 1.28. The only way to safely - modify allocations for a consumer and satisfy expectations you - have regarding the prior existence (or lack of existence) of those - allocations is to always use microversion 1.28+ when calling - allocations API endpoints. - -1.29 Support allocation candidates with nested resource providers ------------------------------------------------------------------ - -.. versionadded:: Rocky - -Add support for nested resource providers with the following two features. -1) ``GET /allocation_candidates`` is aware of nested providers. Namely, when -provider trees are present, ``allocation_requests`` in the response of -``GET /allocation_candidates`` can include allocations on combinations of -multiple resource providers in the same tree. -2) ``root_provider_uuid`` and ``parent_provider_uuid`` are added to -``provider_summaries`` in the response of ``GET /allocation_candidates``. diff --git a/nova/api/openstack/placement/schemas/aggregate.py b/nova/api/openstack/placement/schemas/aggregate.py deleted file mode 100644 index dc5d9492166..00000000000 --- a/nova/api/openstack/placement/schemas/aggregate.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Aggregate schemas for Placement API.""" -import copy - - -_AGGREGATES_LIST_SCHEMA = { - "type": "array", - "items": { - "type": "string", - "format": "uuid" - }, - "uniqueItems": True -} - - -PUT_AGGREGATES_SCHEMA_V1_1 = copy.deepcopy(_AGGREGATES_LIST_SCHEMA) - - -PUT_AGGREGATES_SCHEMA_V1_19 = { - "type": "object", - "properties": { - "aggregates": copy.deepcopy(_AGGREGATES_LIST_SCHEMA), - "resource_provider_generation": { - "type": "integer", - } - }, - "required": [ - "aggregates", - "resource_provider_generation", - ], - "additionalProperties": False, -} diff --git a/nova/api/openstack/placement/schemas/allocation.py b/nova/api/openstack/placement/schemas/allocation.py deleted file mode 100644 index 7c0313b7fc9..00000000000 --- a/nova/api/openstack/placement/schemas/allocation.py +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API schemas for setting and deleting allocations.""" - -import copy - - -ALLOCATION_SCHEMA = { - "type": "object", - "properties": { - "allocations": { - "type": "array", - "minItems": 1, - "items": { - "type": "object", - "properties": { - "resource_provider": { - "type": "object", - "properties": { - "uuid": { - "type": "string", - "format": "uuid" - } - }, - "additionalProperties": False, - "required": ["uuid"] - }, - "resources": { - "type": "object", - "minProperties": 1, - "patternProperties": { - "^[0-9A-Z_]+$": { - "type": "integer", - "minimum": 1, - } - }, - "additionalProperties": False - } - }, - "required": [ - "resource_provider", - "resources" - ], - "additionalProperties": False - } - } - }, - "required": ["allocations"], - "additionalProperties": False -} - -ALLOCATION_SCHEMA_V1_8 = copy.deepcopy(ALLOCATION_SCHEMA) -ALLOCATION_SCHEMA_V1_8['properties']['project_id'] = {'type': 'string', - 'minLength': 1, - 'maxLength': 255} -ALLOCATION_SCHEMA_V1_8['properties']['user_id'] = {'type': 'string', - 'minLength': 1, - 'maxLength': 255} -ALLOCATION_SCHEMA_V1_8['required'].extend(['project_id', 'user_id']) - -# Update the allocation schema to achieve symmetry with the representation -# used when GET /allocations/{consumer_uuid} is called. -# NOTE(cdent): Explicit duplication here for sake of comprehensibility. -ALLOCATION_SCHEMA_V1_12 = { - "type": "object", - "properties": { - "allocations": { - "type": "object", - "minProperties": 1, - # resource provider uuid - "patternProperties": { - "^[0-9a-fA-F-]{36}$": { - "type": "object", - "properties": { - # generation is optional - "generation": { - "type": "integer", - }, - "resources": { - "type": "object", - "minProperties": 1, - # resource class - "patternProperties": { - "^[0-9A-Z_]+$": { - "type": "integer", - "minimum": 1, - } - }, - "additionalProperties": False - } - }, - "required": ["resources"], - "additionalProperties": False - } - }, - "additionalProperties": False - }, - "project_id": { - "type": "string", - "minLength": 1, - "maxLength": 255 - }, - "user_id": { - "type": "string", - "minLength": 1, - "maxLength": 255 - } - }, - "additionalProperties": False, - "required": [ - "allocations", - "project_id", - "user_id" - ] -} - - -# POST to /allocations, added in microversion 1.13, uses the -# POST_ALLOCATIONS_V1_13 schema to allow multiple allocations -# from multiple consumers in one request. It is a dict, keyed by -# consumer uuid, using the form of PUT allocations from microversion -# 1.12. In POST the allocations can be empty, so DELETABLE_ALLOCATIONS -# modifies ALLOCATION_SCHEMA_V1_12 accordingly. -DELETABLE_ALLOCATIONS = copy.deepcopy(ALLOCATION_SCHEMA_V1_12) -DELETABLE_ALLOCATIONS['properties']['allocations']['minProperties'] = 0 -POST_ALLOCATIONS_V1_13 = { - "type": "object", - "minProperties": 1, - "additionalProperties": False, - "patternProperties": { - "^[0-9a-fA-F-]{36}$": DELETABLE_ALLOCATIONS - } -} - -# A required consumer generation was added to the top-level dict in this -# version of PUT /allocations/{consumer_uuid}. In addition, the PUT -# /allocations/{consumer_uuid}/now allows for empty allocations (indicating the -# allocations are being removed) -ALLOCATION_SCHEMA_V1_28 = copy.deepcopy(DELETABLE_ALLOCATIONS) -ALLOCATION_SCHEMA_V1_28['properties']['consumer_generation'] = { - "type": ["integer", "null"], - "additionalProperties": False -} -ALLOCATION_SCHEMA_V1_28['required'].append("consumer_generation") - -# A required consumer generation was added to the allocations dicts in this -# version of POST /allocations -REQUIRED_GENERATION_ALLOCS_POST = copy.deepcopy(DELETABLE_ALLOCATIONS) -alloc_props = REQUIRED_GENERATION_ALLOCS_POST['properties'] -alloc_props['consumer_generation'] = { - "type": ["integer", "null"], - "additionalProperties": False -} -REQUIRED_GENERATION_ALLOCS_POST['required'].append("consumer_generation") -POST_ALLOCATIONS_V1_28 = copy.deepcopy(POST_ALLOCATIONS_V1_13) -POST_ALLOCATIONS_V1_28["patternProperties"] = { - "^[0-9a-fA-F-]{36}$": REQUIRED_GENERATION_ALLOCS_POST -} diff --git a/nova/api/openstack/placement/schemas/allocation_candidate.py b/nova/api/openstack/placement/schemas/allocation_candidate.py deleted file mode 100644 index d418366ff67..00000000000 --- a/nova/api/openstack/placement/schemas/allocation_candidate.py +++ /dev/null @@ -1,78 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API schemas for getting allocation candidates.""" - -import copy - - -# Represents the allowed query string parameters to the GET -# /allocation_candidates API call -GET_SCHEMA_1_10 = { - "type": "object", - "properties": { - "resources": { - "type": "string" - }, - }, - "required": [ - "resources", - ], - "additionalProperties": False, -} - - -# Add limit query parameter. -GET_SCHEMA_1_16 = copy.deepcopy(GET_SCHEMA_1_10) -GET_SCHEMA_1_16['properties']['limit'] = { - # A query parameter is always a string in webOb, but - # we'll handle integer here as well. - "type": ["integer", "string"], - "pattern": "^[1-9][0-9]*$", - "minimum": 1, - "minLength": 1 -} - -# Add required parameter. -GET_SCHEMA_1_17 = copy.deepcopy(GET_SCHEMA_1_16) -GET_SCHEMA_1_17['properties']['required'] = { - "type": ["string"] -} - -# Add member_of parameter. -GET_SCHEMA_1_21 = copy.deepcopy(GET_SCHEMA_1_17) -GET_SCHEMA_1_21['properties']['member_of'] = { - "type": ["string"] -} - -GET_SCHEMA_1_25 = copy.deepcopy(GET_SCHEMA_1_21) -# We're going to *replace* 'resources', 'required', and 'member_of'. -del GET_SCHEMA_1_25["properties"]["resources"] -del GET_SCHEMA_1_25["required"] -del GET_SCHEMA_1_25["properties"]["required"] -del GET_SCHEMA_1_25["properties"]["member_of"] -# Pattern property key format for a numbered or un-numbered grouping -_GROUP_PAT_FMT = "^%s([1-9][0-9]*)?$" -GET_SCHEMA_1_25["patternProperties"] = { - _GROUP_PAT_FMT % "resources": { - "type": "string", - }, - _GROUP_PAT_FMT % "required": { - "type": "string", - }, - _GROUP_PAT_FMT % "member_of": { - "type": "string", - }, -} -GET_SCHEMA_1_25["properties"]["group_policy"] = { - "type": "string", - "enum": ["none", "isolate"], -} diff --git a/nova/api/openstack/placement/schemas/inventory.py b/nova/api/openstack/placement/schemas/inventory.py deleted file mode 100644 index 78f1c45d340..00000000000 --- a/nova/api/openstack/placement/schemas/inventory.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Inventory schemas for Placement API.""" - -import copy - -from nova.db import constants as db_const - - -RESOURCE_CLASS_IDENTIFIER = "^[A-Z0-9_]+$" -BASE_INVENTORY_SCHEMA = { - "type": "object", - "properties": { - "resource_provider_generation": { - "type": "integer" - }, - "total": { - "type": "integer", - "maximum": db_const.MAX_INT, - "minimum": 1, - }, - "reserved": { - "type": "integer", - "maximum": db_const.MAX_INT, - "minimum": 0, - }, - "min_unit": { - "type": "integer", - "maximum": db_const.MAX_INT, - "minimum": 1 - }, - "max_unit": { - "type": "integer", - "maximum": db_const.MAX_INT, - "minimum": 1 - }, - "step_size": { - "type": "integer", - "maximum": db_const.MAX_INT, - "minimum": 1 - }, - "allocation_ratio": { - "type": "number", - "maximum": db_const.SQL_SP_FLOAT_MAX - }, - }, - "required": [ - "total", - "resource_provider_generation" - ], - "additionalProperties": False -} - - -POST_INVENTORY_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA) -POST_INVENTORY_SCHEMA['properties']['resource_class'] = { - "type": "string", - "pattern": RESOURCE_CLASS_IDENTIFIER, -} -POST_INVENTORY_SCHEMA['required'].append('resource_class') -POST_INVENTORY_SCHEMA['required'].remove('resource_provider_generation') - - -PUT_INVENTORY_RECORD_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA) -PUT_INVENTORY_RECORD_SCHEMA['required'].remove('resource_provider_generation') -PUT_INVENTORY_SCHEMA = { - "type": "object", - "properties": { - "resource_provider_generation": { - "type": "integer" - }, - "inventories": { - "type": "object", - "patternProperties": { - RESOURCE_CLASS_IDENTIFIER: PUT_INVENTORY_RECORD_SCHEMA, - } - } - }, - "required": [ - "resource_provider_generation", - "inventories" - ], - "additionalProperties": False -} diff --git a/nova/api/openstack/placement/schemas/resource_class.py b/nova/api/openstack/placement/schemas/resource_class.py deleted file mode 100644 index 8dac3c3456a..00000000000 --- a/nova/api/openstack/placement/schemas/resource_class.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API schemas for resource classes.""" - -import copy - - -POST_RC_SCHEMA_V1_2 = { - "type": "object", - "properties": { - "name": { - "type": "string", - "pattern": "^CUSTOM\_[A-Z0-9_]+$", - "maxLength": 255, - }, - }, - "required": [ - "name" - ], - "additionalProperties": False, -} -PUT_RC_SCHEMA_V1_2 = copy.deepcopy(POST_RC_SCHEMA_V1_2) diff --git a/nova/api/openstack/placement/schemas/resource_provider.py b/nova/api/openstack/placement/schemas/resource_provider.py deleted file mode 100644 index 7ca43ef69ac..00000000000 --- a/nova/api/openstack/placement/schemas/resource_provider.py +++ /dev/null @@ -1,106 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API schemas for resource providers.""" - -import copy - - -POST_RESOURCE_PROVIDER_SCHEMA = { - "type": "object", - "properties": { - "name": { - "type": "string", - "maxLength": 200 - }, - "uuid": { - "type": "string", - "format": "uuid" - } - }, - "required": [ - "name" - ], - "additionalProperties": False, -} -# Remove uuid to create the schema for PUTting a resource provider -PUT_RESOURCE_PROVIDER_SCHEMA = copy.deepcopy(POST_RESOURCE_PROVIDER_SCHEMA) -PUT_RESOURCE_PROVIDER_SCHEMA['properties'].pop('uuid') - -# Placement API microversion 1.14 adds an optional parent_provider_uuid field -# to the POST and PUT request schemas -POST_RP_SCHEMA_V1_14 = copy.deepcopy(POST_RESOURCE_PROVIDER_SCHEMA) -POST_RP_SCHEMA_V1_14["properties"]["parent_provider_uuid"] = { - "anyOf": [ - { - "type": "string", - "format": "uuid", - }, - { - "type": "null", - } - ] -} -PUT_RP_SCHEMA_V1_14 = copy.deepcopy(POST_RP_SCHEMA_V1_14) -PUT_RP_SCHEMA_V1_14['properties'].pop('uuid') - -# Represents the allowed query string parameters to the GET /resource_providers -# API call -GET_RPS_SCHEMA_1_0 = { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "uuid": { - "type": "string", - "format": "uuid" - } - }, - "additionalProperties": False, -} - -# Placement API microversion 1.3 adds support for a member_of attribute -GET_RPS_SCHEMA_1_3 = copy.deepcopy(GET_RPS_SCHEMA_1_0) -GET_RPS_SCHEMA_1_3['properties']['member_of'] = { - "type": "string" -} - -# Placement API microversion 1.4 adds support for requesting resource providers -# having some set of capacity for some resources. The query string is a -# comma-delimited set of "$RESOURCE_CLASS_NAME:$AMOUNT" strings. The validation -# of the string is left up to the helper code in the -# normalize_resources_qs_param() function. -GET_RPS_SCHEMA_1_4 = copy.deepcopy(GET_RPS_SCHEMA_1_3) -GET_RPS_SCHEMA_1_4['properties']['resources'] = { - "type": "string" -} - -# Placement API microversion 1.14 adds support for requesting resource -# providers within a tree of providers. The 'in_tree' query string parameter -# should be the UUID of a resource provider. The result of the GET call will -# include only those resource providers in the same "provider tree" as the -# provider with the UUID represented by 'in_tree' -GET_RPS_SCHEMA_1_14 = copy.deepcopy(GET_RPS_SCHEMA_1_4) -GET_RPS_SCHEMA_1_14['properties']['in_tree'] = { - "type": "string", - "format": "uuid", -} - -# Microversion 1.18 adds support for the `required` query parameter to the -# `GET /resource_providers` API. It accepts a comma-separated list of string -# trait names. When specified, the API results will be filtered to include only -# resource providers marked with all the specified traits. This is in addition -# to (logical AND) any filtering based on other query parameters. -GET_RPS_SCHEMA_1_18 = copy.deepcopy(GET_RPS_SCHEMA_1_14) -GET_RPS_SCHEMA_1_18['properties']['required'] = { - "type": "string", -} diff --git a/nova/api/openstack/placement/schemas/trait.py b/nova/api/openstack/placement/schemas/trait.py deleted file mode 100644 index a46ec5077ac..00000000000 --- a/nova/api/openstack/placement/schemas/trait.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Trait schemas for Placement API.""" - -import copy - -TRAIT = { - "type": "string", - 'minLength': 1, 'maxLength': 255, -} - -CUSTOM_TRAIT = copy.deepcopy(TRAIT) -CUSTOM_TRAIT.update({"pattern": "^CUSTOM_[A-Z0-9_]+$"}) - -PUT_TRAITS_SCHEMA = { - "type": "object", - "properties": { - "traits": { - "type": "array", - "items": CUSTOM_TRAIT, - } - }, - 'required': ['traits'], - 'additionalProperties': False -} - -SET_TRAITS_FOR_RP_SCHEMA = copy.deepcopy(PUT_TRAITS_SCHEMA) -SET_TRAITS_FOR_RP_SCHEMA['properties']['traits']['items'] = TRAIT -SET_TRAITS_FOR_RP_SCHEMA['properties'][ - 'resource_provider_generation'] = {'type': 'integer'} -SET_TRAITS_FOR_RP_SCHEMA['required'].append('resource_provider_generation') - -LIST_TRAIT_SCHEMA = { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "associated": { - "type": "string", - } - }, - "additionalProperties": False -} diff --git a/nova/api/openstack/placement/schemas/usage.py b/nova/api/openstack/placement/schemas/usage.py deleted file mode 100644 index 3b1a1845046..00000000000 --- a/nova/api/openstack/placement/schemas/usage.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Placement API schemas for usage information.""" - -# Represents the allowed query string parameters to GET /usages -GET_USAGES_SCHEMA_1_9 = { - "type": "object", - "properties": { - "project_id": { - "type": "string", - "minLength": 1, - "maxLength": 255, - }, - "user_id": { - "type": "string", - "minLength": 1, - "maxLength": 255, - }, - }, - "required": [ - "project_id" - ], - "additionalProperties": False, -} diff --git a/nova/api/openstack/placement/util.py b/nova/api/openstack/placement/util.py deleted file mode 100644 index 6b3ae052f63..00000000000 --- a/nova/api/openstack/placement/util.py +++ /dev/null @@ -1,697 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Utility methods for placement API.""" - -import functools -import re - -import jsonschema -from oslo_config import cfg -from oslo_log import log as logging -from oslo_middleware import request_id -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import webob - -from nova.api.openstack.placement import errors -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import lib as placement_lib -# NOTE(cdent): avoid cyclical import conflict between util and -# microversion -import nova.api.openstack.placement.microversion -from nova.api.openstack.placement.objects import consumer as consumer_obj -from nova.api.openstack.placement.objects import project as project_obj -from nova.api.openstack.placement.objects import user as user_obj -from nova.i18n import _ - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -# Error code handling constants -ENV_ERROR_CODE = 'placement.error_code' -ERROR_CODE_MICROVERSION = (1, 23) - -# Querystring-related constants -_QS_RESOURCES = 'resources' -_QS_REQUIRED = 'required' -_QS_MEMBER_OF = 'member_of' -_QS_KEY_PATTERN = re.compile( - r"^(%s)([1-9][0-9]*)?$" % '|'.join( - (_QS_RESOURCES, _QS_REQUIRED, _QS_MEMBER_OF))) - - -# NOTE(cdent): This registers a FormatChecker on the jsonschema -# module. Do not delete this code! Although it appears that nothing -# is using the decorated method it is being used in JSON schema -# validations to check uuid formatted strings. -@jsonschema.FormatChecker.cls_checks('uuid') -def _validate_uuid_format(instance): - return uuidutils.is_uuid_like(instance) - - -def check_accept(*types): - """If accept is set explicitly, try to follow it. - - If there is no match for the incoming accept header - send a 406 response code. - - If accept is not set send our usual content-type in - response. - """ - def decorator(f): - @functools.wraps(f) - def decorated_function(req): - if req.accept: - best_matches = req.accept.acceptable_offers(types) - if not best_matches: - type_string = ', '.join(types) - raise webob.exc.HTTPNotAcceptable( - _('Only %(type)s is provided') % {'type': type_string}, - json_formatter=json_error_formatter) - return f(req) - return decorated_function - return decorator - - -def extract_json(body, schema): - """Extract JSON from a body and validate with the provided schema.""" - try: - data = jsonutils.loads(body) - except ValueError as exc: - raise webob.exc.HTTPBadRequest( - _('Malformed JSON: %(error)s') % {'error': exc}, - json_formatter=json_error_formatter) - try: - jsonschema.validate(data, schema, - format_checker=jsonschema.FormatChecker()) - except jsonschema.ValidationError as exc: - raise webob.exc.HTTPBadRequest( - _('JSON does not validate: %(error)s') % {'error': exc}, - json_formatter=json_error_formatter) - return data - - -def inventory_url(environ, resource_provider, resource_class=None): - url = '%s/inventories' % resource_provider_url(environ, resource_provider) - if resource_class: - url = '%s/%s' % (url, resource_class) - return url - - -def json_error_formatter(body, status, title, environ): - """A json_formatter for webob exceptions. - - Follows API-WG guidelines at - http://specs.openstack.org/openstack/api-wg/guidelines/errors.html - """ - # Shortcut to microversion module, to avoid wraps below. - microversion = nova.api.openstack.placement.microversion - - # Clear out the html that webob sneaks in. - body = webob.exc.strip_tags(body) - # Get status code out of status message. webob's error formatter - # only passes entire status string. - status_code = int(status.split(None, 1)[0]) - error_dict = { - 'status': status_code, - 'title': title, - 'detail': body - } - - # Version may not be set if we have experienced an error before it - # is set. - want_version = environ.get(microversion.MICROVERSION_ENVIRON) - if want_version and want_version.matches(ERROR_CODE_MICROVERSION): - error_dict['code'] = environ.get(ENV_ERROR_CODE, errors.DEFAULT) - - # If the request id middleware has had a chance to add an id, - # put it in the error response. - if request_id.ENV_REQUEST_ID in environ: - error_dict['request_id'] = environ[request_id.ENV_REQUEST_ID] - - # When there is a no microversion in the environment and a 406, - # microversion parsing failed so we need to include microversion - # min and max information in the error response. - if status_code == 406 and microversion.MICROVERSION_ENVIRON not in environ: - error_dict['max_version'] = microversion.max_version_string() - error_dict['min_version'] = microversion.min_version_string() - - return {'errors': [error_dict]} - - -def pick_last_modified(last_modified, obj): - """Choose max of last_modified and obj.updated_at or obj.created_at. - - If updated_at is not implemented in `obj` use the current time in UTC. - """ - try: - current_modified = (obj.updated_at or obj.created_at) - except NotImplementedError: - # If updated_at is not implemented, we are looking at objects that - # have not come from the database, so "now" is the right modified - # time. - current_modified = timeutils.utcnow(with_timezone=True) - if last_modified: - last_modified = max(last_modified, current_modified) - else: - last_modified = current_modified - return last_modified - - -def require_content(content_type): - """Decorator to require a content type in a handler.""" - def decorator(f): - @functools.wraps(f) - def decorated_function(req): - if req.content_type != content_type: - # webob's unset content_type is the empty string so - # set it the error message content to 'None' to make - # a useful message in that case. This also avoids a - # KeyError raised when webob.exc eagerly fills in a - # Template for output we will never use. - if not req.content_type: - req.content_type = 'None' - raise webob.exc.HTTPUnsupportedMediaType( - _('The media type %(bad_type)s is not supported, ' - 'use %(good_type)s') % - {'bad_type': req.content_type, - 'good_type': content_type}, - json_formatter=json_error_formatter) - else: - return f(req) - return decorated_function - return decorator - - -def resource_class_url(environ, resource_class): - """Produce the URL for a resource class. - - If SCRIPT_NAME is present, it is the mount point of the placement - WSGI app. - """ - prefix = environ.get('SCRIPT_NAME', '') - return '%s/resource_classes/%s' % (prefix, resource_class.name) - - -def resource_provider_url(environ, resource_provider): - """Produce the URL for a resource provider. - - If SCRIPT_NAME is present, it is the mount point of the placement - WSGI app. - """ - prefix = environ.get('SCRIPT_NAME', '') - return '%s/resource_providers/%s' % (prefix, resource_provider.uuid) - - -def trait_url(environ, trait): - """Produce the URL for a trait. - - If SCRIPT_NAME is present, it is the mount point of the placement - WSGI app. - """ - prefix = environ.get('SCRIPT_NAME', '') - return '%s/traits/%s' % (prefix, trait.name) - - -def validate_query_params(req, schema): - try: - # NOTE(Kevin_Zheng): The webob package throws UnicodeError when - # param cannot be decoded. Catch this and raise HTTP 400. - jsonschema.validate(dict(req.GET), schema, - format_checker=jsonschema.FormatChecker()) - except (jsonschema.ValidationError, UnicodeDecodeError) as exc: - raise webob.exc.HTTPBadRequest( - _('Invalid query string parameters: %(exc)s') % - {'exc': exc}) - - -def wsgi_path_item(environ, name): - """Extract the value of a named field in a URL. - - Return None if the name is not present or there are no path items. - """ - # NOTE(cdent): For the time being we don't need to urldecode - # the value as the entire placement API has paths that accept no - # encoded values. - try: - return environ['wsgiorg.routing_args'][1][name] - except (KeyError, IndexError): - return None - - -def normalize_resources_qs_param(qs): - """Given a query string parameter for resources, validate it meets the - expected format and return a dict of amounts, keyed by resource class name. - - The expected format of the resources parameter looks like so: - - $RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT - - So, if the user was looking for resource providers that had room for an - instance that will consume 2 vCPUs, 1024 MB of RAM and 50GB of disk space, - they would use the following query string: - - ?resources=VCPU:2,MEMORY_MB:1024,DISK_GB:50 - - The returned value would be: - - { - "VCPU": 2, - "MEMORY_MB": 1024, - "DISK_GB": 50, - } - - :param qs: The value of the 'resources' query string parameter - :raises `webob.exc.HTTPBadRequest` if the parameter's value isn't in the - expected format. - """ - if qs.strip() == "": - msg = _('Badly formed resources parameter. Expected resources ' - 'query string parameter in form: ' - '?resources=VCPU:2,MEMORY_MB:1024. Got: empty string.') - raise webob.exc.HTTPBadRequest(msg) - - result = {} - resource_tuples = qs.split(',') - for rt in resource_tuples: - try: - rc_name, amount = rt.split(':') - except ValueError: - msg = _('Badly formed resources parameter. Expected resources ' - 'query string parameter in form: ' - '?resources=VCPU:2,MEMORY_MB:1024. Got: %s.') - msg = msg % rt - raise webob.exc.HTTPBadRequest(msg) - try: - amount = int(amount) - except ValueError: - msg = _('Requested resource %(resource_name)s expected positive ' - 'integer amount. Got: %(amount)s.') - msg = msg % { - 'resource_name': rc_name, - 'amount': amount, - } - raise webob.exc.HTTPBadRequest(msg) - if amount < 1: - msg = _('Requested resource %(resource_name)s requires ' - 'amount >= 1. Got: %(amount)d.') - msg = msg % { - 'resource_name': rc_name, - 'amount': amount, - } - raise webob.exc.HTTPBadRequest(msg) - result[rc_name] = amount - return result - - -def valid_trait(trait, allow_forbidden): - """Return True if the provided trait is the expected form. - - When allow_forbidden is True, then a leading '!' is acceptable. - """ - if trait.startswith('!') and not allow_forbidden: - return False - return True - - -def normalize_traits_qs_param(val, allow_forbidden=False): - """Parse a traits query string parameter value. - - Note that this method doesn't know or care about the query parameter key, - which may currently be of the form `required`, `required123`, etc., but - which may someday also include `preferred`, etc. - - This method currently does no format validation of trait strings, other - than to ensure they're not zero-length. - - :param val: A traits query parameter value: a comma-separated string of - trait names. - :param allow_forbidden: If True, accept forbidden traits (that is, traits - prefixed by '!') as a valid form when notifying - the caller that the provided value is not properly - formed. - :return: A set of trait names. - :raises `webob.exc.HTTPBadRequest` if the val parameter is not in the - expected format. - """ - ret = set(substr.strip() for substr in val.split(',')) - expected_form = 'HW_CPU_X86_VMX,CUSTOM_MAGIC' - if allow_forbidden: - expected_form = 'HW_CPU_X86_VMX,!CUSTOM_MAGIC' - if not all(trait and valid_trait(trait, allow_forbidden) for trait in ret): - msg = _("Invalid query string parameters: Expected 'required' " - "parameter value of the form: %(form)s. " - "Got: %(val)s") % {'form': expected_form, 'val': val} - raise webob.exc.HTTPBadRequest(msg) - return ret - - -def normalize_member_of_qs_params(req, suffix=''): - """Given a webob.Request object, validate that the member_of querystring - parameters are correct. We begin supporting multiple member_of params in - microversion 1.24. - - :param req: webob.Request object - :return: A list containing sets of UUIDs of aggregates to filter on - :raises `webob.exc.HTTPBadRequest` if the microversion requested is <1.24 - and the request contains multiple member_of querystring params - :raises `webob.exc.HTTPBadRequest` if the val parameter is not in the - expected format. - """ - microversion = nova.api.openstack.placement.microversion - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - multi_member_of = want_version.matches((1, 24)) - if not multi_member_of and len(req.GET.getall('member_of' + suffix)) > 1: - raise webob.exc.HTTPBadRequest( - _('Multiple member_of%s parameters are not supported') % suffix) - values = [] - for value in req.GET.getall('member_of' + suffix): - values.append(normalize_member_of_qs_param(value)) - return values - - -def normalize_member_of_qs_param(value): - """Parse a member_of query string parameter value. - - Valid values are either a single UUID, or the prefix 'in:' followed by two - or more comma-separated UUIDs. - - :param value: A member_of query parameter of either a single UUID, or a - comma-separated string of two or more UUIDs, prefixed with - the "in:" operator - :return: A set of UUIDs - :raises `webob.exc.HTTPBadRequest` if the value parameter is not in the - expected format. - """ - if "," in value and not value.startswith("in:"): - msg = _("Multiple values for 'member_of' must be prefixed with the " - "'in:' keyword. Got: %s") % value - raise webob.exc.HTTPBadRequest(msg) - if value.startswith('in:'): - value = set(value[3:].split(',')) - else: - value = set([value]) - # Make sure the values are actually UUIDs. - for aggr_uuid in value: - if not uuidutils.is_uuid_like(aggr_uuid): - msg = _("Invalid query string parameters: Expected 'member_of' " - "parameter to contain valid UUID(s). Got: %s") % aggr_uuid - raise webob.exc.HTTPBadRequest(msg) - return value - - -def parse_qs_request_groups(req): - """Parse numbered resources, traits, and member_of groupings out of a - querystring dict. - - The input qsdict represents a query string of the form: - - ?resources=$RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT - &required=$TRAIT_NAME,$TRAIT_NAME&member_of=in:$AGG1_UUID,$AGG2_UUID - &resources1=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT - &required1=$TRAIT_NAME,$TRAIT_NAME&member_of1=$AGG_UUID - &resources2=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT - &required2=$TRAIT_NAME,$TRAIT_NAME&member_of2=$AGG_UUID - - These are parsed in groups according to the numeric suffix of the key. - For each group, a RequestGroup instance is created containing that group's - resources, required traits, and member_of. For the (single) group with no - suffix, the RequestGroup.use_same_provider attribute is False; for the - numbered groups it is True. - - If a trait in the required parameter is prefixed with ``!`` this - indicates that that trait must not be present on the resource - providers in the group. That is, the trait is forbidden. Forbidden traits - are only processed if ``allow_forbidden`` is True. This allows the - caller to control processing based on microversion handling. - - The return is a dict, keyed by the numeric suffix of these RequestGroup - instances (or the empty string for the unnumbered group). - - As an example, if qsdict represents the query string: - - ?resources=VCPU:2,MEMORY_MB:1024,DISK_GB=50 - &required=HW_CPU_X86_VMX,CUSTOM_STORAGE_RAID - &member_of=in:9323b2b1-82c9-4e91-bdff-e95e808ef954,8592a199-7d73-4465-8df6-ab00a6243c82 # noqa - &resources1=SRIOV_NET_VF:2 - &required1=CUSTOM_PHYSNET_PUBLIC,CUSTOM_SWITCH_A - &resources2=SRIOV_NET_VF:1 - &required2=!CUSTOM_PHYSNET_PUBLIC - - ...the return value will be: - - { '': RequestGroup( - use_same_provider=False, - resources={ - "VCPU": 2, - "MEMORY_MB": 1024, - "DISK_GB" 50, - }, - required_traits=[ - "HW_CPU_X86_VMX", - "CUSTOM_STORAGE_RAID", - ], - member_of=[ - [9323b2b1-82c9-4e91-bdff-e95e808ef954], - [8592a199-7d73-4465-8df6-ab00a6243c82, - ddbd9226-d6a6-475e-a85f-0609914dd058], - ], - ), - '1': RequestGroup( - use_same_provider=True, - resources={ - "SRIOV_NET_VF": 2, - }, - required_traits=[ - "CUSTOM_PHYSNET_PUBLIC", - "CUSTOM_SWITCH_A", - ], - ), - '2': RequestGroup( - use_same_provider=True, - resources={ - "SRIOV_NET_VF": 1, - }, - forbidden_traits=[ - "CUSTOM_PHYSNET_PUBLIC", - ], - ), - } - - :param req: webob.Request object - :return: A list of RequestGroup instances. - :raises `webob.exc.HTTPBadRequest` if any value is malformed, or if a - trait list is given without corresponding resources. - """ - microversion = nova.api.openstack.placement.microversion - want_version = req.environ[microversion.MICROVERSION_ENVIRON] - # Control whether we handle forbidden traits. - allow_forbidden = want_version.matches((1, 22)) - # Temporary dict of the form: { suffix: RequestGroup } - by_suffix = {} - - def get_request_group(suffix): - if suffix not in by_suffix: - rq_grp = placement_lib.RequestGroup(use_same_provider=bool(suffix)) - by_suffix[suffix] = rq_grp - return by_suffix[suffix] - - for key, val in req.GET.items(): - match = _QS_KEY_PATTERN.match(key) - if not match: - continue - # `prefix` is 'resources', 'required', or 'member_of' - # `suffix` is an integer string, or None - prefix, suffix = match.groups() - suffix = suffix or '' - request_group = get_request_group(suffix) - if prefix == _QS_RESOURCES: - request_group.resources = normalize_resources_qs_param(val) - elif prefix == _QS_REQUIRED: - request_group.required_traits = normalize_traits_qs_param( - val, allow_forbidden=allow_forbidden) - elif prefix == _QS_MEMBER_OF: - # special handling of member_of qparam since we allow multiple - # member_of params at microversion 1.24. - # NOTE(jaypipes): Yes, this is inefficient to do this when there - # are multiple member_of query parameters, but we do this so we can - # error out if someone passes an "orphaned" member_of request - # group. - # TODO(jaypipes): Do validation of query parameters using - # JSONSchema - request_group.member_of = normalize_member_of_qs_params( - req, suffix) - - # Ensure any group with 'required' or 'member_of' also has 'resources'. - orphans = [('required%s' % suff) for suff, group in by_suffix.items() - if group.required_traits and not group.resources] - if orphans: - msg = _('All traits parameters must be associated with resources. ' - 'Found the following orphaned traits keys: %s') - raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans)) - orphans = [('member_of%s' % suff) for suff, group in by_suffix.items() - if group.member_of and not group.resources] - if orphans: - msg = _('All member_of parameters must be associated with ' - 'resources. Found the following orphaned member_of ' - 'keys: %s') - raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans)) - # All request groups must have resources (which is almost, but not quite, - # verified by the orphan checks above). - if not all(grp.resources for grp in by_suffix.values()): - msg = _("All request groups must specify resources.") - raise webob.exc.HTTPBadRequest(msg) - # The above would still pass if there were no request groups - if not by_suffix: - msg = _("At least one request group (`resources` or `resources{N}`) " - "is required.") - raise webob.exc.HTTPBadRequest(msg) - - # Make adjustments for forbidden traits by stripping forbidden out - # of required. - if allow_forbidden: - conflicting_traits = [] - for suff, group in by_suffix.items(): - forbidden = [trait for trait in group.required_traits - if trait.startswith('!')] - group.required_traits = (group.required_traits - set(forbidden)) - group.forbidden_traits = set([trait.lstrip('!') for trait in - forbidden]) - conflicts = group.forbidden_traits & group.required_traits - if conflicts: - conflicting_traits.append('required%s: (%s)' - % (suff, ', '.join(conflicts))) - if conflicting_traits: - msg = _('Conflicting required and forbidden traits found in the ' - 'following traits keys: %s') - raise webob.exc.HTTPBadRequest(msg % ', '.join(conflicting_traits)) - - return by_suffix - - -def ensure_consumer(ctx, consumer_uuid, project_id, user_id, - consumer_generation, want_version): - """Ensures there are records in the consumers, projects and users table for - the supplied external identifiers. - - Returns a tuple containing the populated Consumer object containing Project - and User sub-objects and a boolean indicating whether a new Consumer object - was created (as opposed to an existing consumer record retrieved) - - :note: If the supplied project or user external identifiers do not match an - existing consumer's project and user identifiers, the existing - consumer's project and user IDs are updated to reflect the supplied - ones. - - :param ctx: The request context. - :param consumer_uuid: The uuid of the consumer of the resources. - :param project_id: The external ID of the project consuming the resources. - :param user_id: The external ID of the user consuming the resources. - :param consumer_generation: The generation provided by the user for this - consumer. - :param want_version: the microversion matcher. - :raises webob.exc.HTTPConflict if consumer generation is required and there - was a mismatch - """ - created_new_consumer = False - requires_consumer_generation = want_version.matches((1, 28)) - if project_id is None: - project_id = CONF.placement.incomplete_consumer_project_id - user_id = CONF.placement.incomplete_consumer_user_id - try: - proj = project_obj.Project.get_by_external_id(ctx, project_id) - except exception.NotFound: - # Auto-create the project if we found no record of it... - try: - proj = project_obj.Project(ctx, external_id=project_id) - proj.create() - except exception.ProjectExists: - # No worries, another thread created this project already - proj = project_obj.Project.get_by_external_id(ctx, project_id) - try: - user = user_obj.User.get_by_external_id(ctx, user_id) - except exception.NotFound: - # Auto-create the user if we found no record of it... - try: - user = user_obj.User(ctx, external_id=user_id) - user.create() - except exception.UserExists: - # No worries, another thread created this user already - user = user_obj.User.get_by_external_id(ctx, user_id) - - try: - consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid) - if requires_consumer_generation: - if consumer.generation != consumer_generation: - raise webob.exc.HTTPConflict( - _('consumer generation conflict - ' - 'expected %(expected_gen)s but got %(got_gen)s') % - { - 'expected_gen': consumer.generation, - 'got_gen': consumer_generation, - }, - comment=errors.CONCURRENT_UPDATE) - # NOTE(jaypipes): The user may have specified a different project and - # user external ID than the one that we had for the consumer. If this - # is the case, go ahead and modify the consumer record with the - # newly-supplied project/user information, but do not bump the consumer - # generation (since it will be bumped in the - # AllocationList.replace_all() method). - # - # TODO(jaypipes): This means that there may be a partial update. - # Imagine a scenario where a user calls POST /allocations, and the - # payload references two consumers. The first consumer is a new - # consumer and is auto-created. The second consumer is an existing - # consumer, but contains a different project or user ID than the - # existing consumer's record. If the eventual call to - # AllocationList.replace_all() fails for whatever reason (say, a - # resource provider generation conflict or out of resources failure), - # we will end up deleting the auto-created consumer but we MAY not undo - # the changes to the second consumer's project and user ID. I say MAY - # and not WILL NOT because I'm not sure that the exception that gets - # raised from AllocationList.replace_all() will cause the context - # manager's transaction to rollback automatically. I believe that the - # same transaction context is used for both util.ensure_consumer() and - # AllocationList.replace_all() within the same HTTP request, but need - # to test this to be 100% certain... - if (project_id != consumer.project.external_id or - user_id != consumer.user.external_id): - LOG.debug("Supplied project or user ID for consumer %s was " - "different than existing record. Updating consumer " - "record.", consumer_uuid) - consumer.project = proj - consumer.user = user - consumer.update() - except exception.NotFound: - # If we are attempting to modify or create allocations after 1.26, we - # need a consumer generation specified. The user must have specified - # None for the consumer generation if we get here, since there was no - # existing consumer with this UUID and therefore the user should be - # indicating that they expect the consumer did not exist. - if requires_consumer_generation: - if consumer_generation is not None: - raise webob.exc.HTTPConflict( - _('consumer generation conflict - ' - 'expected null but got %s') % consumer_generation, - comment=errors.CONCURRENT_UPDATE) - # No such consumer. This is common for new allocations. Create the - # consumer record - try: - consumer = consumer_obj.Consumer( - ctx, uuid=consumer_uuid, project=proj, user=user) - consumer.create() - created_new_consumer = True - except exception.ConsumerExists: - # No worries, another thread created this user already - consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid) - return consumer, created_new_consumer diff --git a/nova/api/openstack/placement/wsgi.py b/nova/api/openstack/placement/wsgi.py deleted file mode 100644 index c17e2103b3c..00000000000 --- a/nova/api/openstack/placement/wsgi.py +++ /dev/null @@ -1,110 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""WSGI script for Placement API - -WSGI handler for running Placement API under Apache2, nginx, gunicorn etc. -""" - -import logging as py_logging -import os -import os.path - -from oslo_log import log as logging -from oslo_middleware import cors -from oslo_utils import importutils -import pbr.version - -from nova.api.openstack.placement import db_api -from nova.api.openstack.placement import deploy -from nova import conf - - -profiler = importutils.try_import('osprofiler.opts') - - -CONFIG_FILE = 'nova.conf' - - -version_info = pbr.version.VersionInfo('nova') - - -def setup_logging(config): - # Any dependent libraries that have unhelp debug levels should be - # pinned to a higher default. - extra_log_level_defaults = [ - 'routes=INFO', - ] - logging.set_defaults(default_log_levels=logging.get_default_log_levels() + - extra_log_level_defaults) - logging.setup(config, 'nova') - py_logging.captureWarnings(True) - - -def _get_config_file(env=None): - if env is None: - env = os.environ - - dirname = env.get('OS_PLACEMENT_CONFIG_DIR', '/etc/nova').strip() - return os.path.join(dirname, CONFIG_FILE) - - -def _parse_args(argv, default_config_files): - logging.register_options(conf.CONF) - - if profiler: - profiler.set_defaults(conf.CONF) - - _set_middleware_defaults() - - conf.CONF(argv[1:], project='nova', version=version_info.version_string(), - default_config_files=default_config_files) - - -def _set_middleware_defaults(): - """Update default configuration options for oslo.middleware.""" - cors.set_defaults( - allow_headers=['X-Auth-Token', - 'X-Openstack-Request-Id', - 'X-Identity-Status', - 'X-Roles', - 'X-Service-Catalog', - 'X-User-Id', - 'X-Tenant-Id'], - expose_headers=['X-Auth-Token', - 'X-Openstack-Request-Id', - 'X-Subject-Token', - 'X-Service-Token'], - allow_methods=['GET', - 'PUT', - 'POST', - 'DELETE', - 'PATCH'] - ) - - -def init_application(): - # initialize the config system - conffile = _get_config_file() - _parse_args([], default_config_files=[conffile]) - db_api.configure(conf.CONF) - - # initialize the logging system - setup_logging(conf.CONF) - - # dump conf at debug if log_options - if conf.CONF.log_options: - conf.CONF.log_opt_values( - logging.getLogger(__name__), - logging.DEBUG) - - # build and return our WSGI app - return deploy.loadapp(conf.CONF) diff --git a/nova/api/openstack/placement/wsgi_wrapper.py b/nova/api/openstack/placement/wsgi_wrapper.py deleted file mode 100644 index fcb6551d3e4..00000000000 --- a/nova/api/openstack/placement/wsgi_wrapper.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Extend functionality from webob.dec.wsgify for Placement API.""" - -import webob - -from oslo_log import log as logging -from webob.dec import wsgify - -from nova.api.openstack.placement import util - -LOG = logging.getLogger(__name__) - - -class PlacementWsgify(wsgify): - - def call_func(self, req, *args, **kwargs): - """Add json_error_formatter to any webob HTTPExceptions.""" - try: - super(PlacementWsgify, self).call_func(req, *args, **kwargs) - except webob.exc.HTTPException as exc: - LOG.debug("Placement API returning an error response: %s", exc) - exc.json_formatter = util.json_error_formatter - # The exception itself is not passed to json_error_formatter - # but environ is, so set the environ. - if exc.comment: - req.environ[util.ENV_ERROR_CODE] = exc.comment - exc.comment = None - raise diff --git a/nova/api/openstack/requestlog.py b/nova/api/openstack/requestlog.py index 3c41f114f0a..dd0d207e34f 100644 --- a/nova/api/openstack/requestlog.py +++ b/nova/api/openstack/requestlog.py @@ -21,6 +21,9 @@ from nova.api.openstack import wsgi from nova.api import wsgi as base_wsgi +import nova.conf + +CONF = nova.conf.CONF # TODO(sdague) maybe we can use a better name here for the logger LOG = logging.getLogger(__name__) @@ -65,8 +68,17 @@ def _log_req(self, req, res, start): # wsgi stack, res is going to be an empty dict for the # fallback logging. So never count on it having attributes. status = getattr(res, "status", "500 Error").split(None, 1)[0] + + remote_address = req.environ.get('REMOTE_ADDR', '-') + + # If the API is configured to treat the X-Forwarded-For header as the + # canonical remote address, use its value instead. + if CONF.api.use_forwarded_for: + remote_address = req.environ.get( + 'HTTP_X_FORWARDED_FOR', remote_address) + data = { - 'REMOTE_ADDR': req.environ.get('REMOTE_ADDR', '-'), + 'REMOTE_ADDR': remote_address, 'REQUEST_METHOD': req.environ['REQUEST_METHOD'], 'REQUEST_URI': self._get_uri(req.environ), 'status': status, diff --git a/nova/api/openstack/urlmap.py b/nova/api/openstack/urlmap.py index 8482f9b8e59..7a20c0801c4 100644 --- a/nova/api/openstack/urlmap.py +++ b/nova/api/openstack/urlmap.py @@ -14,15 +14,10 @@ # under the License. import re +from urllib import request as urllib2 from oslo_log import log as logging import paste.urlmap -import six - -if six.PY2: - import urllib2 -else: - from urllib import request as urllib2 from nova.api.openstack import wsgi @@ -168,8 +163,27 @@ def _match(self, host, port, path_info): for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue - if (path_info == app_url - or path_info.startswith(app_url + '/')): + # Rudimentary "wildcard" support: + # By declaring a urlmap path ending in '/+', you're saying the + # incoming path must start with everything up to and including the + # '/' *and* have something after that as well. For example, path + # /foo/bar/+ will match /foo/bar/baz, but not /foo/bar/ or /foo/bar + # NOTE(efried): This assumes we'll never need a path URI component + # that legitimately starts with '+'. (We could use a + # more obscure character/sequence here in that case.) + if app_url.endswith('/+'): + # Must be requesting at least the path element (including /) + if not path_info.startswith(app_url[:-1]): + continue + # ...but also must be requesting something after that / + if len(path_info) < len(app_url): + continue + # Trim the /+ off the app_url to make it look "normal" for e.g. + # proper splitting of SCRIPT_NAME and PATH_INFO. + return app, app_url[:-2] + # Normal (non-wildcarded) prefix match + if (path_info == app_url or + path_info.startswith(app_url + '/')): return app, app_url return None, None diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 2eef33c0126..e64b4a2016a 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -21,7 +21,6 @@ from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import strutils -import six import webob from nova.api.openstack import api_version_request as api_version @@ -30,6 +29,7 @@ from nova import exception from nova import i18n from nova.i18n import _ +from nova import version LOG = logging.getLogger(__name__) @@ -74,78 +74,14 @@ def get_supported_content_types(): return _SUPPORTED_CONTENT_TYPES -# NOTE(rlrossit): This function allows a get on both a dict-like and an -# object-like object. cache_db_items() is used on both versioned objects and -# dicts, so the function can't be totally changed over to [] syntax, nor -# can it be changed over to use getattr(). -def item_get(item, item_key): - if hasattr(item, '__getitem__'): - return item[item_key] - else: - return getattr(item, item_key) - - class Request(wsgi.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def __init__(self, *args, **kwargs): super(Request, self).__init__(*args, **kwargs) - self._extension_data = {'db_items': {}} if not hasattr(self, 'api_version_request'): self.api_version_request = api_version.APIVersionRequest() - def cache_db_items(self, key, items, item_key='id'): - """Allow API methods to store objects from a DB query to be - used by API extensions within the same API request. - - An instance of this class only lives for the lifetime of a - single API request, so there's no need to implement full - cache management. - """ - db_items = self._extension_data['db_items'].setdefault(key, {}) - for item in items: - db_items[item_get(item, item_key)] = item - - def get_db_items(self, key): - """Allow an API extension to get previously stored objects within - the same API request. - - Note that the object data will be slightly stale. - """ - return self._extension_data['db_items'][key] - - def get_db_item(self, key, item_key): - """Allow an API extension to get a previously stored object - within the same API request. - - Note that the object data will be slightly stale. - """ - return self.get_db_items(key).get(item_key) - - def cache_db_instances(self, instances): - self.cache_db_items('instances', instances, 'uuid') - - def cache_db_instance(self, instance): - self.cache_db_items('instances', [instance], 'uuid') - - def get_db_instances(self): - return self.get_db_items('instances') - - def get_db_instance(self, instance_uuid): - return self.get_db_item('instances', instance_uuid) - - def cache_db_flavors(self, flavors): - self.cache_db_items('flavors', flavors, 'flavorid') - - def cache_db_flavor(self, flavor): - self.cache_db_items('flavors', [flavor], 'flavorid') - - def get_db_flavors(self): - return self.get_db_items('flavors') - - def get_db_flavor(self, flavorid): - return self.get_db_item('flavors', flavorid) - def best_match_content_type(self): """Determine the requested response content-type.""" if 'nova.best_content_type' not in self.environ: @@ -160,8 +96,10 @@ def best_match_content_type(self): content_type = possible_type if not content_type: - content_type = self.accept.best_match( + best_matches = self.accept.acceptable_offers( get_supported_content_types()) + if best_matches: + content_type = best_matches[0][0] self.environ['nova.best_content_type'] = (content_type or 'application/json') @@ -198,8 +136,19 @@ def best_match_language(self): """ if not self.accept_language: return None - return self.accept_language.best_match( - i18n.get_available_languages()) + + # NOTE(takashin): To decide the default behavior, 'default' is + # preferred over 'default_tag' because that is return as it is when + # no match. This is also little tricky that 'default' value cannot be + # None. At least one of default_tag or default must be supplied as + # an argument to the method, to define the defaulting behavior. + # So passing a sentinal value to return None from this function. + best_match = self.accept_language.lookup( + i18n.get_available_languages(), default='fake_LANG') + + if best_match == 'fake_LANG': + best_match = None + return best_match def set_api_version_request(self): """Set API version request based on the request header information.""" @@ -271,7 +220,7 @@ def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): - return six.text_type(jsonutils.dumps(data)) + return str(jsonutils.dumps(data)) def response(code): @@ -325,6 +274,9 @@ def serialize(self, request, content_type): Utility method for serializing the wrapped object. Returns a webob.Response object. + + Header values are set to the appropriate Python type and + encoding demanded by PEP 3333: whatever the native str type is. """ serializer = self.serializer @@ -335,26 +287,17 @@ def serialize(self, request, content_type): response = webob.Response(body=body) response.status_int = self.code for hdr, val in self._headers.items(): - if not isinstance(val, six.text_type): - val = six.text_type(val) - if six.PY2: - # In Py2.X Headers must be byte strings - response.headers[hdr] = encodeutils.safe_encode(val) - else: - # In Py3.X Headers must be utf-8 strings - response.headers[hdr] = encodeutils.safe_decode( - encodeutils.safe_encode(val)) + # In Py3.X Headers must be a str that was first safely + # encoded to UTF-8 (to catch any bad encodings) and then + # decoded back to a native str. + response.headers[hdr] = encodeutils.safe_decode( + encodeutils.safe_encode(val)) # Deal with content_type - if not isinstance(content_type, six.text_type): - content_type = six.text_type(content_type) - if six.PY2: - # In Py2.X Headers must be byte strings - response.headers['Content-Type'] = encodeutils.safe_encode( - content_type) - else: - # In Py3.X Headers must be utf-8 strings - response.headers['Content-Type'] = encodeutils.safe_decode( - encodeutils.safe_encode(content_type)) + if not isinstance(content_type, str): + content_type = str(content_type) + # In Py3.X Headers must be a str. + response.headers['Content-Type'] = encodeutils.safe_decode( + encodeutils.safe_encode(content_type)) return response @property @@ -396,7 +339,7 @@ class ResourceExceptionHandler(object): """Context manager to handle Resource exceptions. Used when processing exceptions generated by API implementation - methods (or their extensions). Converts most exceptions to Fault + methods. Converts most exceptions to Fault exceptions, with the appropriate logging. """ @@ -463,10 +406,6 @@ def __init__(self, controller): if controller: self.register_actions(controller) - # Save a mapping of extensions - self.wsgi_extensions = {} - self.wsgi_action_extensions = {} - def register_actions(self, controller): """Registers controller actions with this resource.""" @@ -474,25 +413,6 @@ def register_actions(self, controller): for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(controller, method_name) - def register_extensions(self, controller): - """Registers controller extensions with this resource.""" - - extensions = getattr(controller, 'wsgi_extensions', []) - for method_name, action_name in extensions: - # Look up the extending method - extension = getattr(controller, method_name) - - if action_name: - # Extending an action... - if action_name not in self.wsgi_action_extensions: - self.wsgi_action_extensions[action_name] = [] - self.wsgi_action_extensions[action_name].append(extension) - else: - # Extending a regular method - if method_name not in self.wsgi_extensions: - self.wsgi_extensions[method_name] = [] - self.wsgi_extensions[method_name].append(extension) - def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" @@ -526,30 +446,6 @@ def get_body(self, request): def deserialize(self, body): return JSONDeserializer().deserialize(body) - def process_extensions(self, extensions, resp_obj, request, - action_args): - for ext in extensions: - response = None - # Regular functions get post-processing... - try: - with ResourceExceptionHandler(): - response = ext(req=request, resp_obj=resp_obj, - **action_args) - except exception.VersionNotFoundForAPIMethod: - # If an attached extension (@wsgi.extends) for the - # method has no version match its not an error. We - # just don't run the extends code - continue - except Fault as ex: - response = ex - - # We had a response return it, to exit early. This is - # actually a failure mode. None is success. - if response: - return response - - return None - def _should_have_body(self, request): return request.method in _METHODS_WITH_BODY @@ -596,8 +492,8 @@ def _process_stack(self, request, action, action_args, # Get the implementing method try: - meth, extensions = self.get_method(request, action, - content_type, body) + meth = self.get_method(request, action, + content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: @@ -610,7 +506,7 @@ def _process_stack(self, request, action, action_args, if body: msg = _("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % {'action': action, - 'body': six.text_type(body, 'utf-8'), + 'body': str(body, 'utf-8'), 'meth': str(meth)} LOG.debug(strutils.mask_password(msg)) else: @@ -660,24 +556,17 @@ def _process_stack(self, request, action, action_args, # Do a preserialize to set up the response object if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code - # Process extensions - response = self.process_extensions(extensions, resp_obj, - request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept) if hasattr(response, 'headers'): for hdr, val in list(response.headers.items()): - if not isinstance(val, six.text_type): - val = six.text_type(val) - if six.PY2: - # In Py2.X Headers must be byte strings - response.headers[hdr] = encodeutils.safe_encode(val) - else: - # In Py3.X Headers must be utf-8 strings - response.headers[hdr] = encodeutils.safe_decode( - encodeutils.safe_encode(val)) + if not isinstance(val, str): + val = str(val) + # In Py3.X Headers must be a string + response.headers[hdr] = encodeutils.safe_decode( + encodeutils.safe_encode(val)) if not request.api_version_request.is_null(): response.headers[API_VERSION_REQUEST_HEADER] = \ @@ -700,36 +589,33 @@ def _get_request_content(self, body, request): return contents def get_method(self, request, action, content_type, body): - meth, extensions = self._get_method(request, - action, - content_type, - body) - return meth, extensions + meth = self._get_method(request, + action, + content_type, + body) + return meth def _get_method(self, request, action, content_type, body): - """Look up the action-specific method and its extensions.""" + """Look up the action-specific method.""" # Look up the method try: if not self.controller: meth = getattr(self, action) else: meth = getattr(self.controller, action) + return meth except AttributeError: if (not self.wsgi_actions or action not in _ROUTES_METHODS + ['action']): # Propagate the error raise - else: - return meth, self.wsgi_extensions.get(action, []) - if action == 'action': action_name = action_peek(body) else: action_name = action # Look up the action method - return (self.wsgi_actions[action_name], - self.wsgi_action_extensions.get(action_name, [])) + return (self.wsgi_actions[action_name]) def dispatch(self, method, request, action_args): """Dispatch a call to the action-specific method.""" @@ -758,35 +644,6 @@ def decorator(func): return decorator -def extends(*args, **kwargs): - """Indicate a function extends an operation. - - Can be used as either:: - - @extends - def index(...): - pass - - or as:: - - @extends(action='resize') - def _action_resize(...): - pass - """ - - def decorator(func): - # Store enough information to find what we're extending - func.wsgi_extends = (func.__name__, kwargs.get('action')) - return func - - # If we have positional arguments, call the decorator - if args: - return decorator(*args) - - # OK, return the decorator instead - return decorator - - def expected_errors(errors): """Decorator for v2.1 API methods which specifies expected exceptions. @@ -814,6 +671,15 @@ def wrapped(*args, **kwargs): # calls. ResourceExceptionHandler silently # converts NotAuthorized to HTTPForbidden raise + elif isinstance(exc, exception.NotSupported): + # Note(gmann): Special case to handle + # NotSupported exceptions. We want to raise 400 BadRequest + # for the NotSupported exception which is basically used + # to raise for not supported features. Converting it here + # will avoid converting every NotSupported inherited + # exception in API controller. + raise webob.exc.HTTPBadRequest( + explanation=exc.format_message()) elif isinstance(exc, exception.ValidationError): # Note(oomichi): Handle a validation error, which # happens due to invalid API parameters, as an @@ -827,9 +693,10 @@ def wrapped(*args, **kwargs): raise LOG.exception("Unexpected exception in API method") - msg = _('Unexpected API Error. Please report this at ' - 'http://bugs.launchpad.net/nova/ and attach the Nova ' - 'API log if possible.\n%s') % type(exc) + msg = _("Unexpected API Error. " + "%(support)s\n%(exc)s" % { + 'support': version.support_string(), + 'exc': type(exc)}) raise webob.exc.HTTPInternalServerError(explanation=msg) return wrapped @@ -849,7 +716,6 @@ def __new__(mcs, name, bases, cls_dict): # Find all actions actions = {} - extensions = [] versioned_methods = None # start with wsgi actions from base classes for base in bases: @@ -871,12 +737,9 @@ def __new__(mcs, name, bases, cls_dict): continue if getattr(value, 'wsgi_action', None): actions[value.wsgi_action] = key - elif getattr(value, 'wsgi_extends', None): - extensions.append(value.wsgi_extends) - # Add the actions and extensions to the class dict + # Add the actions to the class dict cls_dict['wsgi_actions'] = actions - cls_dict['wsgi_extensions'] = extensions if versioned_methods: cls_dict[VER_METHOD_ATTR] = versioned_methods @@ -884,17 +747,14 @@ def __new__(mcs, name, bases, cls_dict): cls_dict) -@six.add_metaclass(ControllerMetaclass) -class Controller(object): +class Controller(metaclass=ControllerMetaclass): """Default controller.""" _view_builder_class = None - def __init__(self, view_builder=None): + def __init__(self): """Initialize controller with a view builder instance.""" - if view_builder: - self._view_builder = view_builder - elif self._view_builder_class: + if self._view_builder_class: self._view_builder = self._view_builder_class() else: self._view_builder = None diff --git a/nova/api/openstack/wsgi_app.py b/nova/api/openstack/wsgi_app.py index 3fef2df2bc5..3a7383918ce 100644 --- a/nova/api/openstack/wsgi_app.py +++ b/nova/api/openstack/wsgi_app.py @@ -12,6 +12,7 @@ """WSGI application initialization for Nova APIs.""" import os +import sys from oslo_config import cfg from oslo_log import log as logging @@ -23,11 +24,14 @@ from nova import exception from nova import objects from nova import service +from nova import utils CONF = cfg.CONF CONFIG_FILES = ['api-paste.ini', 'nova.conf'] +LOG = logging.getLogger(__name__) + objects.register_all() @@ -40,6 +44,14 @@ def _get_config_files(env=None): def _setup_service(host, name): + try: + utils.raise_if_old_compute() + except exception.TooOldComputeService as e: + if CONF.workarounds.disable_compute_service_check_for_ffu: + LOG.warning(str(e)) + else: + raise + binary = name if name.startswith('nova-') else "nova-%s" % name ctxt = context.get_admin_context() @@ -71,17 +83,18 @@ def application(environ, start_response): return application -def init_application(name): - conf_files = _get_config_files() - config.parse_args([], default_config_files=conf_files) +@utils.run_once('Global data already initialized, not re-initializing.', + LOG.info) +def init_global_data(conf_files): + # NOTE(melwitt): parse_args initializes logging and calls global rpc.init() + # and db_api.configure(). The db_api.configure() call does not initiate any + # connection to the database. - logging.setup(CONF, "nova") - try: - _setup_service(CONF.host, name) - except exception.ServiceTooOld as exc: - return error_application(exc, name) + # NOTE(gibi): sys.argv is set by the wsgi runner e.g. uwsgi sets it based + # on the --pyargv parameter of the uwsgi binary + config.parse_args(sys.argv, default_config_files=conf_files) - service.setup_profiler(name, CONF.host) + logging.setup(CONF, "nova") # dump conf at debug (log_options option comes from oslo.service) # FIXME(mriedem): This is gross but we don't have a public hook into @@ -93,6 +106,27 @@ def init_application(name): logging.getLogger(__name__), logging.DEBUG) + +def init_application(name): + conf_files = _get_config_files() + + # NOTE(melwitt): The init_application method can be called multiple times + # within a single python interpreter instance if any exception is raised + # during it (example: DBConnectionError while setting up the service) and + # apache/mod_wsgi reloads the init_application script. So, we initialize + # global data separately and decorate the method to run only once in a + # python interpreter instance. + init_global_data(conf_files) + + try: + _setup_service(CONF.host, name) + except exception.ServiceTooOld as exc: + return error_application(exc, name) + + # This global init is safe because if we got here, we already successfully + # set up the service and setting up the profile cannot fail. + service.setup_profiler(name, CONF.host) + conf = conf_files[0] return deploy.loadapp('config:%s' % conf, name=name) diff --git a/nova/api/validation/__init__.py b/nova/api/validation/__init__.py index 6f1d0b3475e..289ca71f03b 100644 --- a/nova/api/validation/__init__.py +++ b/nova/api/validation/__init__.py @@ -186,8 +186,8 @@ def wrapper(*args, **kwargs): # out when `additionalProperties=True`. This is for backward # compatible with v2.1 API and legacy v2 API. But it makes the # system more safe for no more unexpected parameters pass down - # to the system. In the future, we may block all of those - # additional parameters by Microversion. + # to the system. In microversion 2.75, we have blocked all of + # those additional parameters. _strip_additional_query_parameters(query_params_schema, req) return func(*args, **kwargs) return wrapper diff --git a/nova/api/openstack/placement/__init__.py b/nova/api/validation/extra_specs/__init__.py similarity index 100% rename from nova/api/openstack/placement/__init__.py rename to nova/api/validation/extra_specs/__init__.py diff --git a/nova/api/validation/extra_specs/accel.py b/nova/api/validation/extra_specs/accel.py new file mode 100644 index 00000000000..79e25b4e2ca --- /dev/null +++ b/nova/api/validation/extra_specs/accel.py @@ -0,0 +1,36 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``accel`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='accel:device_profile', + description=( + 'The name of a device profile to configure for the instance. ' + 'A device profile may be viewed as a "flavor for devices".' + ), + value={ + 'type': str, + 'description': 'A name of a device profile.', + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/aggregate_instance_extra_specs.py b/nova/api/validation/extra_specs/aggregate_instance_extra_specs.py new file mode 100644 index 00000000000..0cf23b28ff6 --- /dev/null +++ b/nova/api/validation/extra_specs/aggregate_instance_extra_specs.py @@ -0,0 +1,72 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for (preferrably) ``aggregate_instance_extra_specs`` namespaced +extra specs. + +These are used by the ``AggregateInstanceExtraSpecsFilter`` scheduler filter. +Note that we explicitly do not support the unnamespaced variant of extra specs +since these have been deprecated since Havana (commit fbedf60a432). Users that +insist on using these can disable extra spec validation. +""" + +from nova.api.validation.extra_specs import base + + +DESCRIPTION = """\ +Specify metadata that must be present on the aggregate of a host. If this +metadata is not present, the host will be rejected. Requires the +``AggregateInstanceExtraSpecsFilter`` scheduler filter. + +The value can be one of the following: + +* ``=`` (equal to or greater than as a number; same as vcpus case) +* ``==`` (equal to as a number) +* ``!=`` (not equal to as a number) +* ``>=`` (greater than or equal to as a number) +* ``<=`` (less than or equal to as a number) +* ``s==`` (equal to as a string) +* ``s!=`` (not equal to as a string) +* ``s>=`` (greater than or equal to as a string) +* ``s>`` (greater than as a string) +* ``s<=`` (less than or equal to as a string) +* ``s<`` (less than as a string) +* ```` (substring) +* ```` (all elements contained in collection) +* ```` (find one of these) +* A specific value, e.g. ``true``, ``123``, ``testing`` +""" + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='aggregate_instance_extra_specs:{key}', + description=DESCRIPTION, + parameters=[ + { + 'name': 'key', + 'description': 'The metadata key to match on', + 'pattern': r'.+', + }, + ], + value={ + # this is totally arbitary, since we need to support specific + # values + 'type': str, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/base.py b/nova/api/validation/extra_specs/base.py new file mode 100644 index 00000000000..2597070127a --- /dev/null +++ b/nova/api/validation/extra_specs/base.py @@ -0,0 +1,120 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import dataclasses +import re +import typing as ty + +from oslo_utils import strutils + +from nova import exception + + +@dataclasses.dataclass +class ExtraSpecValidator: + name: str + description: str + value: ty.Dict[str, ty.Any] + deprecated: bool = False + parameters: ty.List[ty.Dict[str, ty.Any]] = dataclasses.field( + default_factory=list + ) + + name_regex: str = None + value_regex: str = None + + def __post_init__(self): + # generate a regex for the name + + name_regex = self.name + # replace the human-readable patterns with named regex groups; this + # will transform e.g. 'hw:numa_cpus.{id}' to 'hw:numa_cpus.(?P\d+)' + for param in self.parameters: + pattern = f'(?P<{param["name"]}>{param["pattern"]})' + name_regex = name_regex.replace(f'{{{param["name"]}}}', pattern) + + self.name_regex = name_regex + + # ...and do the same for the value, but only if we're using strings + + if self.value['type'] not in (int, str, bool): + raise ValueError( + f"Unsupported parameter type '{self.value['type']}'" + ) + + value_regex = None + if self.value['type'] == str and self.value.get('pattern'): + value_regex = self.value['pattern'] + + self.value_regex = value_regex + + def _validate_str(self, value): + if 'pattern' in self.value: + value_match = re.fullmatch(self.value_regex, value) + if not value_match: + raise exception.ValidationError( + f"Validation failed; '{value}' is not of the format " + f"'{self.value_regex}'." + ) + elif 'enum' in self.value: + if value not in self.value['enum']: + values = ', '.join(str(x) for x in self.value['enum']) + raise exception.ValidationError( + f"Validation failed; '{value}' is not one of: {values}." + ) + + def _validate_int(self, value): + try: + value = int(value) + except ValueError: + raise exception.ValidationError( + f"Validation failed; '{value}' is not a valid integer value." + ) + + if 'max' in self.value and self.value['max'] < value: + raise exception.ValidationError( + f"Validation failed; '{value}' is greater than the max value " + f"of '{self.value['max']}'." + ) + + if 'min' in self.value and self.value['min'] > value: + raise exception.ValidationError( + f"Validation failed; '{value}' is less than the min value " + f"of '{self.value['min']}'." + ) + + def _validate_bool(self, value): + try: + strutils.bool_from_string(value, strict=True) + except ValueError: + raise exception.ValidationError( + f"Validation failed; '{value}' is not a valid boolean-like " + f"value." + ) + + def validate(self, name, value): + name_match = re.fullmatch(self.name_regex, name) + if not name_match: + # NOTE(stephenfin): This is mainly here for testing purposes + raise exception.ValidationError( + f"Validation failed; expected a name of format '{self.name}' " + f"but got '{name}'." + ) + + if self.value['type'] == int: + self._validate_int(value) + elif self.value['type'] == bool: + self._validate_bool(value) + else: # str + self._validate_str(value) diff --git a/nova/api/validation/extra_specs/capabilities.py b/nova/api/validation/extra_specs/capabilities.py new file mode 100644 index 00000000000..fa51770f262 --- /dev/null +++ b/nova/api/validation/extra_specs/capabilities.py @@ -0,0 +1,112 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for (preferrably) ``capabilities`` namespaced extra specs. + +These are used by the ``ComputeCapabilitiesFilter`` scheduler filter. Note that +we explicitly do not allow the unnamespaced variant of extra specs since this +has been deprecated since Grizzly (commit 8ce8e4b6c0d). Users that insist on +using these can disable extra spec validation. + +For all extra specs, the value can be one of the following: + +* ``=`` (equal to or greater than as a number; same as vcpus case) +* ``==`` (equal to as a number) +* ``!=`` (not equal to as a number) +* ``>=`` (greater than or equal to as a number) +* ``<=`` (less than or equal to as a number) +* ``s==`` (equal to as a string) +* ``s!=`` (not equal to as a string) +* ``s>=`` (greater than or equal to as a string) +* ``s>`` (greater than as a string) +* ``s<=`` (less than or equal to as a string) +* ``s<`` (less than as a string) +* ```` (substring) +* ```` (all elements contained in collection) +* ```` (find one of these) +* A specific value, e.g. ``true``, ``123``, ``testing`` + +Examples are: ``>= 5``, ``s== 2.1.0``, `` gcc``, `` aes mmx``, and +`` fpu gpu`` +""" + +from nova.api.validation.extra_specs import base + + +DESCRIPTION = """\ +Specify that the '{capability}' capability provided by the host compute service +satisfy the provided filter value. Requires the ``ComputeCapabilitiesFilter`` +scheduler filter. +""" + +EXTRA_SPEC_VALIDATORS = [] + +# non-nested capabilities (from 'nova.objects.compute_node.ComputeNode' and +# nova.scheduler.host_manager.HostState') + +for capability in ( + 'id', 'uuid', 'service_id', 'host', 'vcpus', 'memory_mb', 'local_gb', + 'vcpus_used', 'memory_mb_used', 'local_gb_used', + 'hypervisor_type', 'hypervisor_version', 'hypervisor_hostname', + 'free_ram_mb', 'free_disk_gb', 'current_workload', 'running_vms', + 'disk_available_least', 'host_ip', 'mapped', + 'cpu_allocation_ratio', 'ram_allocation_ratio', 'disk_allocation_ratio', +) + ( + 'total_usable_ram_mb', 'total_usable_disk_gb', 'disk_mb_used', + 'free_disk_mb', 'vcpus_total', 'vcpus_used', 'num_instances', + 'num_io_ops', 'failed_builds', 'aggregates', 'cell_uuid', 'updated', +): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'capabilities:{capability}', + description=DESCRIPTION.format(capability=capability), + value={ + # this is totally arbitary, since we need to support specific + # values + 'type': str, + }, + ), + ) + + +# nested capabilities (from 'nova.objects.compute_node.ComputeNode' and +# nova.scheduler.host_manager.HostState') + +for capability in ( + 'cpu_info', 'metrics', 'stats', 'numa_topology', 'supported_hv_specs', + 'pci_device_pools', +) + ( + 'nodename', 'pci_stats', 'supported_instances', 'limits', 'instances', +): + EXTRA_SPEC_VALIDATORS.extend([ + base.ExtraSpecValidator( + name=f'capabilities:{capability}{{filter}}', + description=DESCRIPTION.format(capability=capability), + parameters=[ + { + 'name': 'filter', + # this is optional, but if it's present it must be preceded + # by ':' + 'pattern': r'(:\w+)*', + } + ], + value={ + 'type': str, + }, + ), + ]) + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/hw.py b/nova/api/validation/extra_specs/hw.py new file mode 100644 index 00000000000..b28eb7fb072 --- /dev/null +++ b/nova/api/validation/extra_specs/hw.py @@ -0,0 +1,499 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``hw`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +realtime_validators = [ + base.ExtraSpecValidator( + name='hw:cpu_realtime', + description=( + 'Determine whether realtime mode should be enabled for the ' + 'instance or not. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': bool, + 'description': 'Whether to enable realtime priority.', + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_realtime_mask', + description=( + 'A exclusion mask of CPUs that should not be enabled for ' + 'realtime. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'pattern': r'(\^)?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*', + }, + ), +] + +hide_hypervisor_id_validator = [ + base.ExtraSpecValidator( + name='hw:hide_hypervisor_id', + description=( + 'Determine whether the hypervisor ID should be hidden from the ' + 'guest. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': bool, + 'description': 'Whether to hide the hypervisor ID.', + }, + ) +] + +cpu_policy_validators = [ + base.ExtraSpecValidator( + name='hw:cpu_policy', + description=( + 'The policy to apply when determining what host CPUs the guest ' + 'CPUs can run on. ' + 'If ``shared`` (default), guest CPUs can be overallocated but ' + 'cannot float across host cores. ' + 'If ``dedicated``, guest CPUs cannot be overallocated but are ' + 'individually pinned to their own host core. ' + 'If ``mixed``, the policy for each instance CPU can be specified ' + 'using the ``hw:cpu_dedicated_mask`` or ``hw:cpu_realtime_mask`` ' + 'extra specs.' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'The CPU policy.', + 'enum': [ + 'dedicated', + 'shared', + 'mixed', + ], + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_thread_policy', + description=( + 'The policy to apply when determining whether the destination ' + 'host can have hardware threads enabled or not. ' + 'If ``prefer`` (default), hosts with hardware threads will be ' + 'preferred. ' + 'If ``require``, hosts with hardware threads will be required. ' + 'If ``isolate``, hosts with hardware threads will be forbidden. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'The CPU thread policy.', + 'enum': [ + 'prefer', + 'isolate', + 'require', + ], + }, + ), + base.ExtraSpecValidator( + name='hw:emulator_threads_policy', + description=( + 'The policy to apply when determining whether emulator threads ' + 'should be offloaded to a separate isolated core or to a pool ' + 'of shared cores. ' + 'If ``share``, emulator overhead threads will be offloaded to a ' + 'pool of shared cores. ' + 'If ``isolate``, emulator overhead threads will be offloaded to ' + 'their own core. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'The emulator thread policy.', + 'enum': [ + 'isolate', + 'share', + ], + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_dedicated_mask', + description=( + 'A mapping of **guest** (instance) CPUs to be pinned to **host** ' + 'CPUs for an instance with a ``mixed`` CPU policy. ' + 'Any **guest** CPUs which are not in this mapping will float ' + 'across host cores. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': ( + 'The **guest** CPU mapping to be pinned to **host** CPUs for ' + 'an instance with a ``mixed`` CPU policy.' + ), + # This pattern is identical to 'hw:cpu_realtime_mask' pattern. + 'pattern': r'\^?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*', + }, + ), +] + +hugepage_validators = [ + base.ExtraSpecValidator( + name='hw:mem_page_size', + description=( + 'The size of memory pages to allocate to the guest with. ' + 'Can be one of the three alias - ``large``, ``small`` or ' + '``any``, - or an actual size. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'The size of memory page to allocate', + 'pattern': r'(large|small|any|\d+([kKMGT]i?)?(b|bit|B)?)', + }, + ), +] + +numa_validators = [ + base.ExtraSpecValidator( + name='hw:numa_nodes', + description=( + 'The number of virtual NUMA nodes to allocate to configure the ' + 'guest with. ' + 'Each virtual NUMA node will be mapped to a unique host NUMA ' + 'node. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': int, + 'description': 'The number of virtual NUMA nodes to allocate', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:numa_cpus.{num}', + description=( + 'A mapping of **guest** (instance) CPUs to the **guest** (not ' + 'host!) NUMA node identified by ``{num}``. ' + 'This can be used to provide asymmetric CPU-NUMA allocation and ' + 'is necessary where the number of guest NUMA nodes is not a ' + 'factor of the number of guest CPUs. ' + 'Only supported by the libvirt virt driver.' + ), + parameters=[ + { + 'name': 'num', + 'pattern': r'\d+', # positive integers + 'description': 'The ID of the **guest** NUMA node.', + }, + ], + value={ + 'type': str, + 'description': ( + 'The guest CPUs, in the form of a CPU map, to allocate to the ' + 'guest NUMA node identified by ``{num}``.' + ), + 'pattern': r'\^?\d+((-\d+)?(,\^?\d+(-\d+)?)?)*', + }, + ), + base.ExtraSpecValidator( + name='hw:numa_mem.{num}', + description=( + 'A mapping of **guest** memory to the **guest** (not host!) NUMA ' + 'node identified by ``{num}``. ' + 'This can be used to provide asymmetric memory-NUMA allocation ' + 'and is necessary where the number of guest NUMA nodes is not a ' + 'factor of the total guest memory. ' + 'Only supported by the libvirt virt driver.' + ), + parameters=[ + { + 'name': 'num', + 'pattern': r'\d+', # positive integers + 'description': 'The ID of the **guest** NUMA node.', + }, + ], + value={ + 'type': int, + 'description': ( + 'The guest memory, in MB, to allocate to the guest NUMA node ' + 'identified by ``{num}``.' + ), + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:pci_numa_affinity_policy', + description=( + 'The NUMA affinity policy of any PCI passthrough devices or ' + 'SR-IOV network interfaces attached to the instance. ' + 'If ``required`, only PCI devices from one of the host NUMA ' + 'nodes the instance VCPUs are allocated from can be used by said ' + 'instance. ' + 'If ``preferred``, any PCI device can be used, though preference ' + 'will be given to those from the same NUMA node as the instance ' + 'VCPUs. ' + 'If ``legacy`` (default), behavior is as with ``required`` unless ' + 'the PCI device does not support provide NUMA affinity ' + 'information, in which case affinity is ignored. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'The PCI NUMA affinity policy', + 'enum': [ + 'required', + 'preferred', + 'legacy', + 'socket', + ], + }, + ), +] + +cpu_topology_validators = [ + base.ExtraSpecValidator( + name='hw:cpu_sockets', + description=( + 'The number of virtual CPU threads to emulate in the guest ' + 'CPU topology. ' + 'Defaults to the number of vCPUs requested. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU sockets', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_cores', + description=( + 'The number of virtual CPU cores to emulate per socket in the ' + 'guest CPU topology. ' + 'Defaults to ``1``.' + 'Only supported by the libvirt virt driver. ' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU cores', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:cpu_threads', + description=( + 'The number of virtual CPU threads to emulate per core in the ' + 'guest CPU topology.' + 'Defaults to ``1``. ' + 'Only supported by the libvirt virt driver. ' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU threads', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:max_cpu_sockets', + description=( + 'The max number of virtual CPU threads to emulate in the ' + 'guest CPU topology. ' + 'This is used to limit the topologies that can be requested by ' + 'an image and will be used to validate the ``hw_cpu_sockets`` ' + 'image metadata property. ' + 'Only supported by the libvirt virt driver. ' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU sockets', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:max_cpu_cores', + description=( + 'The max number of virtual CPU cores to emulate per socket in the ' + 'guest CPU topology. ' + 'This is used to limit the topologies that can be requested by an ' + 'image and will be used to validate the ``hw_cpu_cores`` image ' + 'metadata property. ' + 'Only supported by the libvirt virt driver. ' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU cores', + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='hw:max_cpu_threads', + description=( + 'The max number of virtual CPU threads to emulate per core in the ' + 'guest CPU topology. ' + 'This is used to limit the topologies that can be requested by an ' + 'image and will be used to validate the ``hw_cpu_threads`` image ' + 'metadata property. ' + 'Only supported by the libvirt virt driver. ' + ), + value={ + 'type': int, + 'description': 'A number of virtual CPU threads', + 'min': 1, + }, + ), +] + +feature_flag_validators = [ + # TODO(stephenfin): Consider deprecating and moving this to the 'os:' + # namespace + base.ExtraSpecValidator( + name='hw:boot_menu', + description=( + 'Whether to show a boot menu when booting the guest. ' + 'Only supported by the libvirt virt driver. ' + ), + value={ + 'type': bool, + 'description': 'Whether to enable the boot menu', + }, + ), + base.ExtraSpecValidator( + name='hw:mem_encryption', + description=( + 'Whether to enable memory encryption for the guest. ' + 'Only supported by the libvirt virt driver on hosts with AMD SEV ' + 'support.' + ), + value={ + 'type': bool, + 'description': 'Whether to enable memory encryption', + }, + ), + base.ExtraSpecValidator( + name='hw:pmem', + description=( + 'A comma-separated list of ``$LABEL``\\ s defined in config for ' + 'vPMEM devices. ' + 'Only supported by the libvirt virt driver on hosts with PMEM ' + 'devices.' + ), + value={ + 'type': str, + 'description': ( + 'A comma-separated list of valid resource class names.' + ), + 'pattern': '([a-zA-Z0-9_]+(,)?)+', + }, + ), + base.ExtraSpecValidator( + name='hw:pmu', + description=( + 'Whether to enable the Performance Monitory Unit (PMU) for the ' + 'guest. ' + 'If this option is not specified, the presence of the vPMU is ' + 'determined by the hypervisor. ' + 'The vPMU is used by tools like ``perf`` in the guest to provide ' + 'more accurate information for profiling application and ' + 'monitoring guest performance. ' + 'For realtime workloads, the emulation of a vPMU can introduce ' + 'additional latency which may be undesirable. ' + 'If the telemetry it provides is not required, such workloads ' + 'should disable this feature. ' + 'For most workloads, the default of unset will be correct. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': bool, + 'description': 'Whether to enable the PMU', + }, + ), + base.ExtraSpecValidator( + name='hw:serial_port_count', + description=( + 'The number of serial ports to allocate to the guest. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': int, + 'min': 0, + 'description': 'The number of serial ports to allocate', + }, + ), + base.ExtraSpecValidator( + name='hw:tpm_model', + description=( + 'The model of the attached TPM device. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'A TPM model', + 'enum': [ + 'tpm-tis', + 'tpm-crb', + ], + }, + ), + base.ExtraSpecValidator( + name='hw:tpm_version', + description=( + "The TPM version. " + "Required if requesting a vTPM via the 'hw:tpm_model' extra spec " + "or equivalent image metadata property. " + "Only supported by the libvirt virt driver." + ), + value={ + 'type': str, + 'description': 'A TPM version.', + 'enum': [ + '1.2', + '2.0', + ], + }, + ), + base.ExtraSpecValidator( + name='hw:watchdog_action', + description=( + 'The action to take when the watchdog timer is kicked. ' + 'Watchdog devices keep an eye on the instance and carry out the ' + 'specified action if the server hangs. ' + 'The watchdog uses the ``i6300esb`` device, emulating a PCI Intel ' + '6300ESB. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + 'description': 'The action to take', + 'enum': [ + 'none', + 'pause', + 'poweroff', + 'reset', + 'disabled', + ], + }, + ), +] + + +def register(): + return ( + realtime_validators + + hide_hypervisor_id_validator + + cpu_policy_validators + + hugepage_validators + + numa_validators + + cpu_topology_validators + + feature_flag_validators + ) diff --git a/nova/api/validation/extra_specs/hw_rng.py b/nova/api/validation/extra_specs/hw_rng.py new file mode 100644 index 00000000000..c60b2b92a2a --- /dev/null +++ b/nova/api/validation/extra_specs/hw_rng.py @@ -0,0 +1,57 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``hw_rng`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): Move these to the 'hw:' namespace +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='hw_rng:allowed', + description=( + 'Whether to disable configuration of a random number generator ' + 'in their image. Before 21.0.0 (Ussuri), random number generators ' + 'were not enabled by default so this was used to determine ' + 'whether to **enable** configuration.' + ), + value={ + 'type': bool, + }, + ), + base.ExtraSpecValidator( + name='hw_rng:rate_bytes', + description=( + 'The allowed amount of bytes for the guest to read from the ' + 'host\'s entropy per period.' + ), + value={ + 'type': int, + 'min': 0, + }, + ), + base.ExtraSpecValidator( + name='hw_rng:rate_period', + description='The duration of a read period in milliseconds.', + value={ + 'type': int, + 'min': 0, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/hw_video.py b/nova/api/validation/extra_specs/hw_video.py new file mode 100644 index 00000000000..dc9aa0fe665 --- /dev/null +++ b/nova/api/validation/extra_specs/hw_video.py @@ -0,0 +1,39 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``hw_video`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): Move these to the 'hw:' namespace +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='hw_video:ram_max_mb', + description=( + 'The maximum amount of memory the user can request using the ' + '``hw_video_ram`` image metadata property, which represents the ' + 'video memory that the guest OS will see. This has no effect for ' + 'vGPUs.' + ), + value={ + 'type': int, + 'min': 0, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/null.py b/nova/api/validation/extra_specs/null.py new file mode 100644 index 00000000000..20af8a62655 --- /dev/null +++ b/nova/api/validation/extra_specs/null.py @@ -0,0 +1,55 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for non-namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='hide_hypervisor_id', + description=( + 'Determine whether the hypervisor ID should be hidden from the ' + 'guest. Only supported by the libvirt virt driver. ' + 'This extra spec is not compatible with the ' + 'AggregateInstanceExtraSpecsFilter scheduler filter. ' + 'The ``hw:hide_hypervisor_id`` extra spec should be used instead.' + ), + value={ + 'type': bool, + 'description': 'Whether to hide the hypervisor ID.', + }, + deprecated=True, + ), + # TODO(stephenfin): This should be moved to a namespace + base.ExtraSpecValidator( + name='group_policy', + description=( + 'The group policy to apply when using the granular resource ' + 'request syntax.' + ), + value={ + 'type': str, + 'enum': [ + 'isolate', + 'none', + ], + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/os.py b/nova/api/validation/extra_specs/os.py new file mode 100644 index 00000000000..09ba6283ea3 --- /dev/null +++ b/nova/api/validation/extra_specs/os.py @@ -0,0 +1,95 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``os`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): Most of these belong in the 'hw:' or 'hyperv:' namespace +# and should be moved. +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='os:secure_boot', + description=( + 'Determine whether secure boot is enabled or not. Only supported ' + 'by the libvirt and HyperV virt drivers.' + ), + value={ + 'type': str, + 'description': 'Whether secure boot is required or not', + 'enum': [ + 'disabled', + 'required', + ], + }, + ), + base.ExtraSpecValidator( + name='os:resolution', + description=( + 'Guest VM screen resolution size. Only supported by the HyperV ' + 'driver.' + ), + value={ + 'type': str, + 'description': 'The chosen resolution', + 'enum': [ + '1024x768', + '1280x1024', + '1600x1200', + '1920x1200', + '2560x1600', + '3840x2160', + ], + }, + ), + base.ExtraSpecValidator( + name='os:monitors', + description=( + 'Guest VM number of monitors. Only supported by the HyperV driver.' + ), + value={ + 'type': int, + 'description': 'The number of monitors enabled', + 'min': 1, + 'max': 8, + }, + ), + # TODO(stephenfin): Consider merging this with the 'hw_video_ram' image + # metadata property or adding a 'hw:video_ram' extra spec that works for + # both Hyper-V and libvirt. + base.ExtraSpecValidator( + name='os:vram', + description=( + 'Guest VM VRAM amount. Only supported by the HyperV driver.' + ), + # NOTE(stephenfin): This is really an int, but because there's a + # limited range of options we treat it as a string + value={ + 'type': str, + 'description': 'Amount of VRAM to allocate to instance', + 'enum': [ + '64', + '128', + '256', + '512', + '1024', + ], + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/pci_passthrough.py b/nova/api/validation/extra_specs/pci_passthrough.py new file mode 100644 index 00000000000..4eed0a189e9 --- /dev/null +++ b/nova/api/validation/extra_specs/pci_passthrough.py @@ -0,0 +1,43 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``pci_passthrough`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='pci_passthrough:alias', + description=( + 'Specify the number of ``$alias`` PCI device(s) to attach to the ' + 'instance. ' + 'Must be of format ``$alias:$count``, where ``$alias`` ' + 'corresponds to a particular PCI device class (as configured in ' + '``nova.conf``) and ``$count`` is the amount of PCI devices of ' + 'type ``$alias`` to be assigned to the instance. ' + 'Use commas to specify multiple values. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': str, + # one or more comma-separated '$alias:$count' values + 'pattern': r'[^:]+:\d+(?:\s*,\s*[^:]+:\d+)*', + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/powervm.py b/nova/api/validation/extra_specs/powervm.py new file mode 100644 index 00000000000..58ef7937776 --- /dev/null +++ b/nova/api/validation/extra_specs/powervm.py @@ -0,0 +1,271 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``powervm`` namespaced extra specs. + +These were all taken from the IBM documentation. + +https://www.ibm.com/support/knowledgecenter/SSXK2N_1.4.4/com.ibm.powervc.standard.help.doc/powervc_pg_flavorsextraspecs_hmc.html +""" + +from nova.api.validation.extra_specs import base + + +# TODO(stephenfin): A lot of these seem to overlap with existing 'hw:' extra +# specs and could be deprecated in favour of those. +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='powervm:min_mem', + description=( + 'Minimum memory (MB). If you do not specify the value, the value ' + 'is defaulted to the value for ``memory_mb``.' + ), + value={ + 'type': int, + 'min': 256, + 'description': 'Integer >=256 divisible by LMB size of the target', + }, + ), + base.ExtraSpecValidator( + name='powervm:max_mem', + description=( + 'Maximum memory (MB). If you do not specify the value, the value ' + 'is defaulted to the value for ``memory_mb``.' + ), + value={ + 'type': int, + 'min': 256, + 'description': 'Integer >=256 divisible by LMB size of the target', + }, + ), + base.ExtraSpecValidator( + name='powervm:min_vcpu', + description=( + 'Minimum virtual processors. Minimum resource that is required ' + 'for LPAR to boot is 1. The maximum value can be equal to the ' + 'value, which is set to vCPUs. If you specify the value of the ' + 'attribute, you must also specify value of powervm:max_vcpu. ' + 'Defaults to value set for vCPUs.' + ), + value={ + 'type': int, + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='powervm:max_vcpu', + description=( + 'Minimum virtual processors. Minimum resource that is required ' + 'for LPAR to boot is 1. The maximum value can be equal to the ' + 'value, which is set to vCPUs. If you specify the value of the ' + 'attribute, you must also specify value of powervm:max_vcpu. ' + 'Defaults to value set for vCPUs.' + ), + value={ + 'type': int, + 'min': 1, + }, + ), + base.ExtraSpecValidator( + name='powervm:proc_units', + description=( + 'The wanted ``proc_units``. The value for the attribute cannot be ' + 'less than 1/10 of the value that is specified for Virtual ' + 'CPUs (vCPUs) for hosts with firmware level 7.5 or earlier and ' + '1/20 of the value that is specified for vCPUs for hosts with ' + 'firmware level 7.6 or later. If the value is not specified ' + 'during deployment, it is defaulted to vCPUs * 0.5.' + ), + value={ + 'type': str, + 'pattern': r'\d+\.\d+', + 'description': ( + 'Float (divisible by 0.1 for hosts with firmware level 7.5 or ' + 'earlier and 0.05 for hosts with firmware level 7.6 or later)' + ), + }, + ), + base.ExtraSpecValidator( + name='powervm:min_proc_units', + description=( + 'Minimum ``proc_units``. The minimum value for the attribute is ' + '0.1 for hosts with firmware level 7.5 or earlier and 0.05 for ' + 'hosts with firmware level 7.6 or later. The maximum value must ' + 'be equal to the maximum value of ``powervm:proc_units``. If you ' + 'specify the attribute, you must also specify ' + '``powervm:proc_units``, ``powervm:max_proc_units``, ' + '``powervm:min_vcpu``, `powervm:max_vcpu``, and ' + '``powervm:dedicated_proc``. Set the ``powervm:dedicated_proc`` ' + 'to false.' + '\n' + 'The value for the attribute cannot be less than 1/10 of the ' + 'value that is specified for powervm:min_vcpu for hosts with ' + 'firmware level 7.5 or earlier and 1/20 of the value that is ' + 'specified for ``powervm:min_vcpu`` for hosts with firmware ' + 'level 7.6 or later. If you do not specify the value of the ' + 'attribute during deployment, it is defaulted to equal the value ' + 'of ``powervm:proc_units``.' + ), + value={ + 'type': str, + 'pattern': r'\d+\.\d+', + 'description': ( + 'Float (divisible by 0.1 for hosts with firmware level 7.5 or ' + 'earlier and 0.05 for hosts with firmware level 7.6 or later)' + ), + }, + ), + base.ExtraSpecValidator( + name='powervm:max_proc_units', + description=( + 'Maximum ``proc_units``. The minimum value can be equal to `` ' + '``powervm:proc_units``. The maximum value for the attribute ' + 'cannot be more than the value of the host for maximum allowed ' + 'processors per partition. If you specify this attribute, you ' + 'must also specify ``powervm:proc_units``, ' + '``powervm:min_proc_units``, ``powervm:min_vcpu``, ' + '``powervm:max_vcpu``, and ``powervm:dedicated_proc``. Set the ' + '``powervm:dedicated_proc`` to false.' + '\n' + 'The value for the attribute cannot be less than 1/10 of the ' + 'value that is specified for powervm:max_vcpu for hosts with ' + 'firmware level 7.5 or earlier and 1/20 of the value that is ' + 'specified for ``powervm:max_vcpu`` for hosts with firmware ' + 'level 7.6 or later. If you do not specify the value of the ' + 'attribute during deployment, the value is defaulted to equal the ' + 'value of ``powervm:proc_units``.' + ), + value={ + 'type': str, + 'pattern': r'\d+\.\d+', + 'description': ( + 'Float (divisible by 0.1 for hosts with firmware level 7.5 or ' + 'earlier and 0.05 for hosts with firmware level 7.6 or later)' + ), + }, + ), + base.ExtraSpecValidator( + name='powervm:dedicated_proc', + description=( + 'Use dedicated processors. The attribute defaults to false.' + ), + value={ + 'type': bool, + }, + ), + base.ExtraSpecValidator( + name='powervm:shared_weight', + description=( + 'Shared processor weight. When ``powervm:dedicated_proc`` is set ' + 'to true and ``powervm:uncapped`` is also set to true, the value ' + 'of the attribute defaults to 128.' + ), + value={ + 'type': int, + 'min': 0, + 'max': 255, + }, + ), + base.ExtraSpecValidator( + name='powervm:availability_priority', + description=( + 'Availability priority. The attribute priority of the server if ' + 'there is a processor failure and there are not enough resources ' + 'for all servers. VIOS and i5 need to remain high priority ' + 'default of 191. The value of the attribute defaults to 128.' + ), + value={ + 'type': int, + 'min': 0, + 'max': 255, + }, + ), + base.ExtraSpecValidator( + name='powervm:uncapped', + description=( + 'LPAR can use unused processor cycles that are beyond or exceed ' + 'the wanted setting of the attribute. This attribute is ' + 'supported only when ``powervm:dedicated_proc`` is set to false. ' + 'When ``powervm:dedicated_proc`` is set to false, ' + '``powervm:uncapped`` defaults to true.' + ), + value={ + 'type': bool, + }, + ), + base.ExtraSpecValidator( + name='powervm:dedicated_sharing_mode', + description=( + 'Sharing mode for dedicated processors. The attribute is ' + 'supported only when ``powervm:dedicated_proc`` is set to true.' + ), + value={ + 'type': str, + 'enum': ( + 'share_idle_procs', + 'keep_idle_procs', + 'share_idle_procs_active', + 'share_idle_procs_always', + ) + }, + ), + base.ExtraSpecValidator( + name='powervm:processor_compatibility', + description=( + 'A processor compatibility mode is a value that is assigned to a ' + 'logical partition by the hypervisor that specifies the processor ' + 'environment in which the logical partition can successfully ' + 'operate.' + ), + value={ + 'type': str, + 'enum': ( + 'default', + 'POWER6', + 'POWER6+', + 'POWER6_Enhanced', + 'POWER6+_Enhanced', + 'POWER7', + 'POWER8' + ), + }, + ), + base.ExtraSpecValidator( + name='powervm:shared_proc_pool_name', + description=( + 'Specifies the shared processor pool to be targeted during ' + 'deployment of a virtual machine.' + ), + value={ + 'type': str, + 'description': 'String with upper limit of 14 characters', + }, + ), + base.ExtraSpecValidator( + name='powervm:srr_capability', + description=( + 'If the value of simplified remote restart capability is set to ' + 'true for the LPAR, you can remote restart the LPAR to supported ' + 'CEC or host when the source CEC or host is down. The attribute ' + 'defaults to false.' + ), + value={ + 'type': bool, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/quota.py b/nova/api/validation/extra_specs/quota.py new file mode 100644 index 00000000000..f0ff3db2a67 --- /dev/null +++ b/nova/api/validation/extra_specs/quota.py @@ -0,0 +1,190 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``quota`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [] + + +# CPU, memory, disk IO and VIF quotas (VMWare) +for key, name, unit in ( + ('cpu', 'CPU', 'MHz'), + ('memory', 'memory', 'MB'), + ('disk_io', 'disk IO', 'I/O per second'), + ('vif', 'virtual interface', 'Mbps'), +): + EXTRA_SPEC_VALIDATORS.extend( + [ + base.ExtraSpecValidator( + name=f'quota:{key}_limit', + description=( + f'The upper limit for {name} allocation in {unit}. ' + f'The utilization of an instance will not exceed this ' + f'limit, even if there are available resources. ' + f'This is typically used to ensure a consistent ' + f'performance of instances independent of available ' + f'resources.' + f'The value ``0`` indicates that {name} usage is not ' + f'limited.' + f'Only supported by the VMWare virt driver.' + ), + value={ + 'type': int, + 'min': 0, + }, + ), + base.ExtraSpecValidator( + name=f'quota:{key}_reservation', + description=( + f'The guaranteed minimum {name} reservation in {unit}. ' + f'This means the specified amount of {name} that will ' + f'be guaranteed for the instance. ' + f'Only supported by the VMWare virt driver.' + ), + value={ + 'type': int, + }, + ), + base.ExtraSpecValidator( + name=f'quota:{key}_shares_level', + description=( + f"The allocation level for {name}. If you choose " + f"'custom', set the number of {name} shares using " + f"'quota:{key}_shares_share'. " + f"Only supported by the VMWare virt driver." + ), + value={ + 'type': str, + 'enum': ['custom', 'high', 'normal', 'low'], + }, + ), + base.ExtraSpecValidator( + name=f'quota:{key}_shares_share', + description=( + f"The number of shares of {name} allocated in the " + f"event that 'quota:{key}_shares_level=custom' is " + f"used. " + f"Ignored otherwise. " + f"There is no unit for this value: it is a relative " + f"measure based on the settings for other instances. " + f"Only supported by the VMWare virt driver." + ), + value={ + 'type': int, + 'min': 0, + }, + ), + ] + ) + + +# CPU quotas (libvirt) +EXTRA_SPEC_VALIDATORS.extend( + [ + base.ExtraSpecValidator( + name='quota:cpu_shares', + description=( + 'The proportional weighted share for the domain. ' + 'If this element is omitted, the service defaults to the OS ' + 'provided defaults. ' + 'There is no unit for the value; it is a relative measure ' + 'based on the setting of other VMs. ' + 'For example, a VM configured with a value of 2048 gets ' + 'twice as much CPU time as a VM configured with value 1024. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': int, + 'min': 0, + }, + ), + base.ExtraSpecValidator( + name='quota:cpu_period', + description=( + 'Specifies the enforcement interval in microseconds. ' + 'Within a period, each VCPU of the instance is not allowed ' + 'to consume more than the quota worth of runtime. ' + 'The value should be in range 1,000 - 1,000,000. ' + 'A period with a value of 0 means no value. ' + 'Only supported by the libvirt virt driver.' + ), + value={ + 'type': int, + 'min': 0, + }, + ), + base.ExtraSpecValidator( + name='quota:cpu_quota', + description=( + "The maximum allowed bandwidth in microseconds. " + "Can be combined with 'quota:cpu_period' to limit an instance " + "to a percentage of capacity of a physical CPU. " + "The value should be in range 1,000 - 2^64 or negative. " + "A negative value indicates that the instance has infinite " + "bandwidth. " + "Only supported by the libvirt virt driver." + ), + value={ + 'type': int, + }, + ), + ] +) + + +# Disk quotas (libvirt, HyperV) +for stat in ('read', 'write', 'total'): + for metric in ('bytes', 'iops'): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'quota:disk_{stat}_{metric}_sec', + # NOTE(stephenfin): HyperV supports disk_total_{metric}_sec + # too; update + description=( + f'The quota {stat} {metric} for disk. ' + f'Only supported by the libvirt virt driver.' + ), + value={ + 'type': int, + 'min': 0, + }, + ) + ) + + +# VIF quotas (libvirt) +# TODO(stephenfin): Determine whether this should be deprecated now that +# nova-network is dead +for stat in ('inbound', 'outbound'): + for metric in ('average', 'peak', 'burst'): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'quota:vif_{stat}_{metric}', + description=( + f'The quota {stat} {metric} for VIF. Only supported ' + f'by the libvirt virt driver.' + ), + value={ + 'type': int, + 'min': 0, + }, + ) + ) + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/resources.py b/nova/api/validation/extra_specs/resources.py new file mode 100644 index 00000000000..54f59fb516b --- /dev/null +++ b/nova/api/validation/extra_specs/resources.py @@ -0,0 +1,65 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``resources`` namespaced extra specs.""" + +import os_resource_classes + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [] + +for resource_class in os_resource_classes.STANDARDS: + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'resources{{group}}:{resource_class}', + description=f'The amount of resource {resource_class} requested.', + value={ + 'type': int, + }, + parameters=[ + { + 'name': 'group', + 'pattern': r'([a-zA-Z0-9_-]{1,64})?', + }, + ], + ) + ) + +EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name='resources{group}:CUSTOM_{resource}', + description=( + 'The amount of resource CUSTOM_{resource} requested.' + ), + value={ + 'type': int, + }, + parameters=[ + { + 'name': 'group', + 'pattern': r'([a-zA-Z0-9_-]{1,64})?', + }, + { + 'name': 'resource', + 'pattern': r'[A-Z0-9_]+', + }, + ], + ) +) + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/traits.py b/nova/api/validation/extra_specs/traits.py new file mode 100644 index 00000000000..194350c2fa9 --- /dev/null +++ b/nova/api/validation/extra_specs/traits.py @@ -0,0 +1,73 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``traits`` namespaced extra specs.""" + +import os_traits + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [] + +for trait in os_traits.get_traits(): + EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name=f'trait{{group}}:{trait}', + description=f'Require or forbid trait {trait}.', + value={ + 'type': str, + 'enum': [ + 'required', + 'forbidden', + ], + }, + parameters=[ + { + 'name': 'group', + 'pattern': r'([a-zA-Z0-9_-]{1,64})?', + }, + ], + ) + ) + +EXTRA_SPEC_VALIDATORS.append( + base.ExtraSpecValidator( + name='trait{group}:CUSTOM_{trait}', + description=( + 'Require or forbid trait CUSTOM_{trait}.' + ), + value={ + 'type': str, + 'enum': [ + 'required', + 'forbidden', + ], + }, + parameters=[ + { + 'name': 'group', + 'pattern': r'([a-zA-Z0-9_-]{1,64})?', + }, + { + 'name': 'trait', + 'pattern': r'[A-Z0-9_]+', + }, + ], + ) +) + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/extra_specs/validators.py b/nova/api/validation/extra_specs/validators.py new file mode 100644 index 00000000000..2163892d71d --- /dev/null +++ b/nova/api/validation/extra_specs/validators.py @@ -0,0 +1,86 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for all extra specs known by nova.""" + +import re +import typing as ty + +from oslo_log import log as logging +from stevedore import extension + +from nova.api.validation.extra_specs import base +from nova import exception + +LOG = logging.getLogger(__name__) + +VALIDATORS: ty.Dict[str, base.ExtraSpecValidator] = {} +NAMESPACES: ty.Set[str] = set() + + +def validate(name: str, value: str): + """Validate a given extra spec. + + :param name: Extra spec name. + :param value: Extra spec value. + :raises: exception.ValidationError if validation fails. + """ + # attempt a basic lookup for extra specs without embedded parameters + if name in VALIDATORS: + VALIDATORS[name].validate(name, value) + return + + # if that failed, fallback to a linear search through the registry + for validator in VALIDATORS.values(): + if re.fullmatch(validator.name_regex, name): + validator.validate(name, value) + return + + # check if there's a namespace; if not, we've done all we can do + if ':' not in name: # no namespace + return + + # if there is, check if it's one we recognize + for namespace in NAMESPACES: + if re.fullmatch(namespace, name.split(':', 1)[0]): + break + else: + return + + raise exception.ValidationError( + f"Validation failed; extra spec '{name}' does not appear to be a " + f"valid extra spec." + ) + + +def load_validators(): + global VALIDATORS + + def _report_load_failure(mgr, ep, err): + LOG.warning(u'Failed to load %s: %s', ep.module_name, err) + + mgr = extension.ExtensionManager( + 'nova.api.extra_spec_validators', + on_load_failure_callback=_report_load_failure, + invoke_on_load=False, + ) + for ext in mgr: + # TODO(stephenfin): Make 'register' return a dict rather than a list? + for validator in ext.plugin.register(): + VALIDATORS[validator.name] = validator + if ':' in validator.name_regex: + NAMESPACES.add(validator.name_regex.split(':', 1)[0]) + + +load_validators() diff --git a/nova/api/validation/extra_specs/vmware.py b/nova/api/validation/extra_specs/vmware.py new file mode 100644 index 00000000000..96a03d294a7 --- /dev/null +++ b/nova/api/validation/extra_specs/vmware.py @@ -0,0 +1,48 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Validators for ``vmware`` namespaced extra specs.""" + +from nova.api.validation.extra_specs import base + + +EXTRA_SPEC_VALIDATORS = [ + base.ExtraSpecValidator( + name='vmware:hw_version', + description=( + 'Specify the hardware version used to create images. In an ' + 'environment with different host versions, you can use this ' + 'parameter to place instances on the correct hosts.' + ), + value={ + 'type': str, + }, + ), + base.ExtraSpecValidator( + name='vmware:storage_policy', + description=( + 'Specify the storage policy used for new instances.' + '\n' + 'If Storage Policy-Based Management (SPBM) is not enabled, this ' + 'parameter is ignored.' + ), + value={ + 'type': str, + }, + ), +] + + +def register(): + return EXTRA_SPEC_VALIDATORS diff --git a/nova/api/validation/parameter_types.py b/nova/api/validation/parameter_types.py index 8c54c1993d5..79badb7d142 100644 --- a/nova/api/validation/parameter_types.py +++ b/nova/api/validation/parameter_types.py @@ -16,14 +16,34 @@ """ import copy +import functools import re import unicodedata -import six - from nova.i18n import _ from nova.objects import tag +_REGEX_RANGE_CACHE = {} + + +def memorize(func): + + @functools.wraps(func) + def memorizer(*args, **kwargs): + global _REGEX_RANGE_CACHE + key = "%s:%s:%s" % (func.__name__, hash(str(args)), hash(str(kwargs))) + value = _REGEX_RANGE_CACHE.get(key) + if value is None: + value = func(*args, **kwargs) + _REGEX_RANGE_CACHE[key] = value + return value + return memorizer + + +def _reset_cache(): + global _REGEX_RANGE_CACHE + _REGEX_RANGE_CACHE = {} + def single_param(schema): """Macro function for use in JSONSchema to support query parameters that @@ -72,7 +92,7 @@ def _is_printable(char): def _get_all_chars(): for i in range(0xFFFF): - yield six.unichr(i) + yield chr(i) # build a regex that matches all printable characters. This allows @@ -83,6 +103,7 @@ def _get_all_chars(): # constraint fails and this causes issues for some unittests when # PYTHONHASHSEED is set randomly. +@memorize def _build_regex_range(ws=True, invert=False, exclude=None): """Build a range regex for a set of characters in utf8. @@ -96,7 +117,7 @@ def _build_regex_range(ws=True, invert=False, exclude=None): The inversion is useful when we want to generate a set of ranges which is everything that's not a certain class. For instance, - produce all all the non printable characters as a set of ranges. + produce all the non printable characters as a set of ranges. """ if exclude is None: exclude = [] @@ -141,6 +162,7 @@ def valid_char(char): regex += "-" + re.escape(c) return regex + valid_name_regex_base = '^(?![%s])[%s]*(? CONF.bigvm_cluster_max_usage_percent or + reserved > CONF.bigvm_cluster_max_reservation_percent): + continue + filtered_provider_summaries[p] = d + + if not filtered_provider_summaries: + LOG.warning('Could not find a resource-provider to free up a ' + 'host for hypervisor size %(hv_size)d, because ' + 'all clusters are already using more than ' + '%(max_used)d%% of total memory or reserving more ' + 'than %(max_reserved)d%% of reservable memory.', + {'hv_size': hv_size, + 'max_used': CONF.bigvm_cluster_max_usage_percent, + 'max_reserved': + CONF.bigvm_cluster_max_reservation_percent}) + continue + + # filter out providers that are disabled for bigVMs + provider_summaries = filtered_provider_summaries + filtered_provider_summaries = {} + for p, d in provider_summaries.items(): + if BIGVM_DISABLED_TRAIT in d['traits']: + continue + filtered_provider_summaries[p] = d + + if not filtered_provider_summaries: + LOG.warning('Could not find a resource-provider to free up a ' + 'host for hypervisor size %(hv_size)d, because ' + 'all providers with enough space are disabled.', + {'hv_size': hv_size}) + continue + + candidates[hv_size] = (alloc_reqs, filtered_provider_summaries) + + for vc in vcenters: + for hv_size in missing_hv_sizes_per_vc[vc]: + if hv_size not in candidates: + LOG.warning('Could not find a resource-provider to free ' + 'up a host for hypervisor size %(hv_size)d in ' + '%(vc)s.', + {'hv_size': hv_size, 'vc': vc}) + continue + alloc_reqs, provider_summaries = candidates[hv_size] + + # filter providers by VC, as placement returned all matching + # providers + providers = {p: d for p, d in provider_summaries.items() + if vmware_providers.get(p, {}).get('vc') == vc} + + # select the one with the least usage + def _free_memory(p): + memory = providers[p]['resources'][MEMORY_MB] + return memory['capacity'] - memory['used'] + + provider_uuids = sorted((p for p in providers), + key=_free_memory, reverse=True) + + try: + for rp_uuid in provider_uuids: + host = vmware_providers[rp_uuid]['host'] + cm = vmware_providers[rp_uuid]['cell_mapping'] + with nova_context.target_cell(context, cm) as cctxt: + if self._free_host_for_provider(cctxt, rp_uuid, + host): + break + except oslo_exceptions.MessagingTimeout as e: + # we don't know if the timeout happened after we started + # freeing a host already or because we couldn't reach the + # nova-compute node. Therefore, we move on to the next HV + # size for that VC and hope the timeout resolves for the + # next run. + LOG.exception(e) + LOG.warning('Skipping HV size %(hv_size)s in VC %(vc)s ' + 'because of error', + {'hv_size': hv_size, 'vc': vc}) + + def _get_providers(self, context): + """Return our special and the basic vmware resource-providers + + This returns a list of vcenters and two dicts, where the + resource-provider uuid is the key. The value contains a dict with the + important information for each resource-provider, like host, az, vc and + cell_mapping + either the hypervisor size (vmware provider) or the + resource-provider dict (special provider). + """ + client = self.placement_client + + vmware_hvs = {} + for cm in CellMappingList.get_all(context): + with nova_context.target_cell(context, cm) as cctxt: + vmware_hvs.update({cn.uuid: cn.host for cn in + ComputeNodeList.get_by_hypervisor_type(cctxt, + VMWARE_HV_TYPE) + if not cn.deleted}) + + host_azs = {} + host_vcs = {} + for agg in AggregateList.get_all(context): + if not agg.availability_zone: + continue + + if agg.name == agg.availability_zone: + for host in agg.hosts: + host_azs[host] = agg.name + elif agg.name.startswith(SHARD_PREFIX): + for host in agg.hosts: + host_vcs[host] = agg.name + + vcenters = set(host_vcs.values()) + + host_mappings = {hm.host: hm.cell_mapping + for hm in HostMappingList.get_all(context)} + + # find all resource-providers that we added and also a list of vmware + # resource-providers + bigvm_providers = {} + vmware_providers = {} + resp = client.get('/resource_providers', + version=NESTED_PROVIDER_API_VERSION) + for rp in resp.json()['resource_providers']: + if rp['name'].startswith(CONF.bigvm_deployment_rp_name_prefix): + # retrieve the aggregates + url = '/resource_providers/{}/aggregates'.format(rp['uuid']) + resp = client.get(url, version=NESTED_PROVIDER_API_VERSION) + if resp.status_code != 200: + LOG.error('Could not retrieve aggregates for RP %(name)s ' + '(%(rp)s).', + {'name': rp['name'], 'rp': rp['uuid']}) + continue + aggregates = resp.json()['aggregates'] + if not aggregates: + LOG.error('RP %(name)s (%(rp)s) has no aggregate. Cannot ' + 'find "parent" provider.', + {'name': rp['name'], 'rp': rp['uuid']}) + continue + for agg in aggregates: + if agg not in vmware_hvs: + continue + host_rp_uuid = agg + break + else: + LOG.error('RP %(name)s (%(rp)s) has no aggregate matching ' + 'a compute node UUID. Cannot find "parent" ' + 'provider in %(aggs)s', + {'name': rp['name'], 'rp': rp['uuid'], + 'aggs': ', '.join(aggregates)}) + continue + host = vmware_hvs[host_rp_uuid] + cell_mapping = host_mappings[host] + bigvm_providers[rp['uuid']] = {'rp': rp, + 'host': host, + 'az': host_azs[host], + 'vc': host_vcs[host], + 'cell_mapping': cell_mapping, + 'host_rp_uuid': host_rp_uuid} + elif rp['uuid'] not in vmware_hvs: # ignore baremetal + continue + else: + # retrieve inventory for MEMORY_MB & MEMORY_RESERVABLE_MB info + url = '/resource_providers/{}/inventories'.format(rp['uuid']) + resp = client.get(url) + if resp.status_code != 200: + LOG.error('Could not retrieve inventory for RP %(rp)s.', + {'rp': rp['uuid']}) + continue + inventory = resp.json()["inventories"] + # Note(jakobk): It's possible to encounter incomplete (e.g. + # in-buildup) resource providers here, that don't have all the + # usual resources set. + memory_mb_inventory = inventory.get(MEMORY_MB) + if not memory_mb_inventory: + LOG.info('no %(mem_res)s resource in RP %(rp)s', + {'mem_res': MEMORY_MB, 'rp': rp['uuid']}) + continue + memory_reservable_mb_inventory = inventory.get( + MEMORY_RESERVABLE_MB_RESOURCE) + if not memory_reservable_mb_inventory: + LOG.debug('no %(memreserv_res)s in resource provider' + ' %(rp)s', + {'memreserv_res': MEMORY_RESERVABLE_MB_RESOURCE, + 'rp': rp['uuid']}) + continue + + # retrieve the usage + url = '/resource_providers/{}/usages' + resp = client.get(url.format(rp['uuid'])) + if resp.status_code != 200: + LOG.error('Could not retrieve usages for RP %(rp)s.', + {'rp': rp['uuid']}) + continue + usages = resp.json()['usages'] + + hv_size = memory_mb_inventory['max_unit'] + memory_mb_total = (memory_mb_inventory['total'] - + memory_mb_inventory['reserved']) + try: + memory_mb_used_percent = (usages[MEMORY_MB] / float( + memory_mb_total) * 100) + except ZeroDivisionError: + LOG.warning('memory_mb_total is 0 for resource provider ' + '%s', rp['uuid']) + memory_mb_used_percent = 100 + + memory_reservable_mb_total = ( + memory_reservable_mb_inventory['total'] - + memory_reservable_mb_inventory['reserved']) + try: + memory_reservable_mb_used_percent = ( + usages.get(MEMORY_RESERVABLE_MB_RESOURCE, 0) / float( + memory_reservable_mb_total) * 100) + except ZeroDivisionError: + LOG.info('memory_reservable_mb_total is 0 for resource ' + 'provider %s', rp['uuid']) + memory_reservable_mb_used_percent = 100 + + host = vmware_hvs[rp['uuid']] + # ignore hypervisors we would never use anyways + if hv_size < CONF.bigvm_mb: + LOG.debug('Ignoring %(host)s (%(hv_size)s < %(bigvm_mb)s)', + {'host': host, 'hv_size': hv_size, + 'bigvm_mb': CONF.bigvm_mb}) + continue + + cell_mapping = host_mappings[host] + if host not in host_azs or host not in host_vcs: + # seen this happening during buildup + LOG.debug('Ignoring %(host)s as it is not assigned to an ' + 'AZ or VC.', + {'host': host}) + continue + + # retrieve traits so we can find disabled and hana exclusive + # hosts + traits = client._get_provider_traits(context, rp['uuid']) + vmware_providers[rp['uuid']] = { + 'hv_size': hv_size, + 'host': host, + 'az': host_azs[host], + 'vc': host_vcs[host], + 'cell_mapping': cell_mapping, + 'traits': traits, + 'memory_mb_used_percent': memory_mb_used_percent, + 'memory_reservable_mb_used_percent': + memory_reservable_mb_used_percent} + + # make sure the placement cache is filled + client.get_provider_tree_and_ensure_root(context, rp['uuid'], + rp['name']) + + # retrieve all bigvm provider's inventories + for rp_uuid, rp in bigvm_providers.items(): + inventory = client._get_inventory(context, rp_uuid) + rp['inventory'] = inventory['inventories'] + + # make sure grouping by hv_size works properly later on, even if there + # are marginal differences in the reported hv_size. we need to have all + # vmware_providers to have the same hv_size if they're in the same + # "bucket", e.g. they're supposed to be 3 TB HVs. To make sure the + # placement query for allocation-candidates works, we use the smallest + # size and assign it to all in the same bucket. + hv_size_bucket = None + for rp_uuid, rp in sorted(vmware_providers.items(), + key=lambda x: x[1]['hv_size']): + if hv_size_bucket is None: + # first one is always a new bucket + hv_size_bucket = rp['hv_size'] + continue + + threshold = HV_SIZE_BUCKET_THRESHOLD_PERCENT * hv_size_bucket / 100 + if rp['hv_size'] - hv_size_bucket > threshold: + # set key if the difference to the last key is over the + # threshold + hv_size_bucket = rp['hv_size'] + + rp['hv_size'] = hv_size_bucket + + return (vcenters, bigvm_providers, vmware_providers) + + def _check_and_clean_providers(self, context, client, bigvm_providers, + vmware_providers): + + # check for reserved resources which indicate that the free host was + # consumed + providers_to_delete = {rp_uuid: rp + for rp_uuid, rp in bigvm_providers.items() + if rp['inventory'].get(BIGVM_RESOURCE, {}) + .get('reserved')} + + # check if we don't have a valid vmware provider for it (anymore) and + # thus cannot be a valid provider ourselves + providers_to_delete.update({ + rp_uuid: rp for rp_uuid, rp in bigvm_providers.items() + if rp_uuid not in providers_to_delete and + rp['host_rp_uuid'] not in vmware_providers}) + + # check for resource-providers having more than + # bigvm_cluster_max_usage_percent usage + for rp_uuid, rp in bigvm_providers.items(): + if rp_uuid in providers_to_delete: + # no need to check if we already remove it anyways + continue + + host_rp = vmware_providers[rp['host_rp_uuid']] + # Hosts exclusively used for hana_* flavors cannot be too full + if BIGVM_EXCLUSIVE_TRAIT in host_rp['traits']: + continue + + used_percent = host_rp['memory_mb_used_percent'] + if used_percent > CONF.bigvm_cluster_max_usage_percent: + providers_to_delete[rp_uuid] = rp + LOG.info('Resource-provider %(host_rp_uuid)s with free host ' + 'is overused on regular memory usage. Marking ' + '%(rp_uuid)s for deletion.', + {'host_rp_uuid': rp['host_rp_uuid'], + 'rp_uuid': rp_uuid}) + continue + + reserved_percent = host_rp['memory_reservable_mb_used_percent'] + if reserved_percent > CONF.bigvm_cluster_max_reservation_percent: + providers_to_delete[rp_uuid] = rp + LOG.info('Resource-provider %(host_rp_uuid)s with free host ' + 'is overused on reserved memory usage. Marking ' + '%(rp_uuid)s for deletion.', + {'host_rp_uuid': rp['host_rp_uuid'], + 'rp_uuid': rp_uuid}) + + # check if a provider got used in the background without our knowledge + for rp_uuid, rp in bigvm_providers.items(): + if rp_uuid in providers_to_delete: + # no need to check if we already remove it anyways + continue + + # if we have no resources on the resource-provider, we don't expect + # it to be free, yet + if not rp['inventory'].get(BIGVM_RESOURCE, {}): + continue + + # ask the compute-node if the host is still free. anything other + # than FREE_HOST_STATE_DONE means we've got an unexpected state and + # should re-schedule that size + cm = rp['cell_mapping'] + with nova_context.target_cell(context, cm) as cctxt: + state = self.special_spawn_rpc.free_host(cctxt, rp['host']) + if state != special_spawning.FREE_HOST_STATE_DONE: + LOG.info('Checking on already freed up host %(host)s ' + 'returned with state %(state)s. Marking ' + '%(rp_uuid)s for deletion.', + {'host': rp['host'], + 'state': state, + 'rp_uuid': rp_uuid}) + providers_to_delete[rp_uuid] = rp + + # check if a provider was disabled by now + for rp_uuid, rp in bigvm_providers.items(): + if rp_uuid in providers_to_delete: + # no need to check if we already remove it anyways + continue + + host_rp = vmware_providers[rp['host_rp_uuid']] + if BIGVM_DISABLED_TRAIT in host_rp['traits']: + providers_to_delete[rp_uuid] = rp + LOG.info('Resource-provider %(host_rp_uuid)s got disabled ' + 'bigVMs. Marking %(rp_uuid)s for deletion.', + {'host_rp_uuid': host_rp['uuid'], + 'rp_uuid': rp_uuid}) + + for rp_uuid, rp in providers_to_delete.items(): + self._clean_up_consumed_provider(context, rp_uuid, rp) + + # clean up our list of resource-providers from consumed or overused + # hosts + for rp_uuid in providers_to_delete: + del bigvm_providers[rp_uuid] + + def _get_allocations_for_consumer(self, context, consumer_uuid): + """Same as SchedulerReportClient.get_allocations_for_consumer() but + includes user_id and project_id in the returned values, by doing the + request with a newer version. + """ + client = self.placement_client + url = '/allocations/%s' % consumer_uuid + resp = client.get(url, global_request_id=context.global_id, + version=1.17) + if not resp: + return {} + else: + return resp.json() + + def _remove_provider_from_consumer_allocations(self, context, + consumer_uuid, rp_uuid): + """This is basically the same as + SchedulerClient.remove_provider_from_instance_allocation, but without + the resize-on-same-host detection - it simply removes the provider from + the allocations of the consumer. + """ + client = self.placement_client + + # get the allocation details, because we need user_id and + # project_id to call the delete function + current_allocs = \ + self._get_allocations_for_consumer(context, consumer_uuid) + + LOG.debug('Found the following allocations for consumer ' + '%(consumer_uuid)s: %(allocations)s', + {'consumer_uuid': consumer_uuid, + 'allocations': current_allocs}) + + new_allocs = [ + { + 'resource_provider': { + 'uuid': alloc_rp_uuid, + }, + 'resources': alloc['resources'], + } + for alloc_rp_uuid, alloc in current_allocs['allocations'].items() + if alloc_rp_uuid != rp_uuid + ] + payload = {'allocations': new_allocs, + 'project_id': current_allocs['project_id'], + 'user_id': current_allocs['user_id']} + LOG.debug("Sending updated allocation %s for instance %s after " + "removing resources for %s.", + new_allocs, consumer_uuid, rp_uuid) + url = '/allocations/%s' % consumer_uuid + r = client.put(url, payload, version='1.10', + global_request_id=context.global_id) + if r.status_code != 204: + LOG.warning("Failed to save allocation for %s. Got HTTP %s: %s", + consumer_uuid, r.status_code, r.text) + return r.status_code == 204 + + def _clean_up_consumed_provider(self, context, rp_uuid, rp): + """Clean up after a resource-provider was consumed + + We need to remove all allocations, the resource-provider itself and + also the hostgroup from the vCenter. + """ + client = self.placement_client + + # find the consumer + allocations = client.get_allocations_for_resource_provider(context, + rp_uuid) + # we might have already deleted them and got killed or the VM got + # deleted in the mean time + failures = 0 + for consumer_uuid, resources in allocations.items(): + # delete the allocations + # we can't use + # SchedulerClient.remove_provider_from_instance_allocation here, as + # this would detect a resize and try to remove the resources, but + # keep an allocation. We need the specific allocation for our + # resource-provider removed. + if self._remove_provider_from_consumer_allocations(context, + consumer_uuid, rp_uuid): + LOG.info('Removed bigvm allocations for %(consumer_uuid)s ' + 'from RP', {'consumer_uuid': consumer_uuid}) + else: + LOG.error('Could not remove bigvm allocations for ' + '%(consumer_uuid)s corresponding RP.', + {'consumer_uuid': consumer_uuid}) + failures += 1 + + if failures: + # skip removing the resource-provider because we couldn't + # remove all allocations. we'll retry on the next run + LOG.warning('Skippping removal of resource-provider ' + '%(rp_uuid)s as we could not remove some ' + 'allocations.', + {'rp_uuid': rp_uuid}) + return + + # remove the hostgroup from the host + cm = rp['cell_mapping'] + with nova_context.target_cell(context, cm) as cctxt: + if not self.special_spawn_rpc.remove_host_from_hostgroup(cctxt, + rp['host']): + LOG.warning('Skipping removal of resource-provider ' + '%(rp_uuid)s as we could not remove the hostgroup ' + 'from the vCenter.', + {'rp_uuid': rp_uuid}) + return + + # delete the resource-provider + client._delete_provider(rp_uuid) + LOG.info('Removed resource-provider %(rp_uuid)s.', + {'rp_uuid': rp_uuid}) + + def _get_missing_hv_sizes(self, context, vcenters, + bigvm_providers, vmware_providers): + """Search and return hypervisor sizes having no freed-up host + + Returns a dict containing a set of hv sizes missing a freed-up host for + each vCenter. + """ + found_hv_sizes_per_vc = {vc: set() for vc in vcenters} + + for rp_uuid, rp in bigvm_providers.items(): + host_rp_uuid = rp['host_rp_uuid'] + hv_size = vmware_providers[host_rp_uuid]['hv_size'] + found_hv_sizes_per_vc[rp['vc']].add(hv_size) + + # if there are no resources in that resource-provider, it means, + # that we started freeing up a host. We have to check the process + # state and add the resources once it's done. + if not rp['inventory'].get(BIGVM_RESOURCE): + cm = rp['cell_mapping'] + with nova_context.target_cell(context, cm) as cctxt: + state = self.special_spawn_rpc.free_host(cctxt, rp['host']) + + if state == special_spawning.FREE_HOST_STATE_DONE: + self._add_resources_to_provider(context, rp_uuid, rp) + elif state == special_spawning.FREE_HOST_STATE_ERROR: + LOG.warning('Freeing a host for spawning failed on ' + '%(host)s.', + {'host': rp['host']}) + # do some cleanup, so another compute-node is used + found_hv_sizes_per_vc[rp['vc']].remove(hv_size) + self._clean_up_consumed_provider(context, rp_uuid, rp) + else: + LOG.info('Waiting for host on %(host)s to free up.', + {'host': rp['host']}) + + hv_sizes_per_vc = { + vc: set(rp['hv_size'] for rp in vmware_providers.values() + if rp['vc'] == vc) + for vc in vcenters} + + missing_hv_sizes_per_vc = { + vc: hv_sizes_per_vc[vc] - found_hv_sizes_per_vc[vc] + for vc in vcenters} + + return missing_hv_sizes_per_vc + + def _add_resources_to_provider(self, context, rp_uuid, rp): + """Add our custom resources to the provider so they can be consumed. + + This should be called once the host is freed up in the cluster. + """ + client = self.placement_client + inv_data = {BIGVM_RESOURCE: { + # we use 2 here so we can reserve 1 later. in queens we can't + # reserve $total + 'max_unit': 2, 'min_unit': 2, 'total': 2}} + + # we can't use `set_inventory_for_provider` here, because that doesn't + # tell us if we succeeded ... so we copy everything over. + client._ensure_resource_provider( + context, rp_uuid, rp['rp']['name']) + + # Auto-create custom resource classes coming from a virt driver + for rc_name in inv_data: + if rc_name not in orc.STANDARDS: + client._ensure_resource_classes(context, [rc_name]) + + if client._update_inventory(context, rp_uuid, inv_data): + LOG.info('Added inventory to the resource-provider for spawning ' + 'on %(host)s.', + {'host': rp['host']}) + else: + LOG.error('Adding inventory to the resource-provider for ' + 'spawning on %(host)s failed.', + {'host': rp['host']}) + + def _free_host_for_provider(self, context, rp_uuid, host): + """Takes care of creating a child resource provider in placement to + "claim" a resource-provider/host for freeing up a host. Then calls the + driver to actually free up the host in the cluster. + """ + client = self.placement_client + needs_cleanup = True + new_rp_uuid = None + try: + # TODO(jkulik) try to reserve the necessary memory for freeing a + # full hypervisor + + # create a child resource-provider + new_rp_name = '{}-{}'.format(CONF.bigvm_deployment_rp_name_prefix, + host) + # this is basically copied from placement client, but we don't want + # to set the uuid manually which it doesn't support + url = "/resource_providers" + payload = { + 'name': new_rp_name + } + resp = client.post(url, payload, + version=NESTED_PROVIDER_API_VERSION, + global_request_id=context.global_id) + placement_req_id = get_placement_request_id(resp) + if resp.status_code == 201: + new_rp_uuid = resp.headers['Location'].split('/')[-1] + msg = ("[%(placement_req_id)s] Created resource provider " + "record via placement API for host %(host)s for " + "special spawning: %(rp_uuid)s") + args = { + 'host': host, + 'placement_req_id': placement_req_id, + 'rp_uuid': new_rp_uuid + } + LOG.info(msg, args) + else: + msg = ("[%(placement_req_id)s] Failed to create resource " + "provider record in placement API for %(host)s for " + "special spawning. Got %(status_code)d: %(err_text)s.") + args = { + 'host': host, + 'status_code': resp.status_code, + 'err_text': resp.text, + 'placement_req_id': placement_req_id, + } + LOG.error(msg, args) + raise exception.ResourceProviderCreationFailed( + name=new_rp_name) + # make sure the placement cache is filled + client.get_provider_tree_and_ensure_root(context, new_rp_uuid, + new_rp_name) + + # ensure the parent resource-provider has its uuid as aggregate set + # in addition to its previous aggregates + agg_info = client._get_provider_aggregates(context, rp_uuid) + if rp_uuid not in agg_info.aggregates: + agg_info.aggregates.add(rp_uuid) + client.set_aggregates_for_provider( + context, rp_uuid, agg_info.aggregates, + generation=agg_info.generation) + + # add the newly-created resource-provider to the parent uuid's + # aggregate + client.set_aggregates_for_provider(context, new_rp_uuid, [rp_uuid]) + + # make the newly-created resource-provider share its resources with + # its aggregates + client.set_traits_for_provider(context, new_rp_uuid, + ['MISC_SHARES_VIA_AGGREGATE']) + + # find a host and let DRS free it up + state = self.special_spawn_rpc.free_host(context, host) + + if state == special_spawning.FREE_HOST_STATE_DONE: + # there were free resources available immediately + needs_cleanup = False + new_rp = {'host': host, + 'rp': {'name': new_rp_name}} + self._add_resources_to_provider(context, new_rp_uuid, new_rp) + elif state == special_spawning.FREE_HOST_STATE_STARTED: + # it started working on it. we have to check back later + # if it's done + needs_cleanup = False + finally: + # clean up placement, if something went wrong + if needs_cleanup and new_rp_uuid is not None: + client._delete_provider(new_rp_uuid) + + return not needs_cleanup diff --git a/nova/block_device.py b/nova/block_device.py index 11644c1c684..d1306eb0a60 100644 --- a/nova/block_device.py +++ b/nova/block_device.py @@ -13,11 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import re from oslo_log import log as logging from oslo_utils import strutils - +from oslo_utils import units import nova.conf from nova import exception @@ -34,6 +35,12 @@ 'root': DEFAULT_ROOT_DEV_NAME, 'swap': 'sda3'} +# Image attributes which Cinder stores in volume image metadata +# as regular properties +VIM_IMAGE_ATTRIBUTES = ( + 'image_id', 'image_name', 'size', 'checksum', + 'container_format', 'disk_format', 'min_ram', 'min_disk', +) bdm_legacy_fields = set(['device_name', 'delete_on_termination', 'virtual_name', 'snapshot_id', @@ -45,7 +52,7 @@ 'guest_format', 'device_type', 'disk_bus', 'boot_index', 'device_name', 'delete_on_termination', 'snapshot_id', 'volume_id', 'volume_size', 'image_id', 'no_device', - 'connection_info', 'tag']) + 'connection_info', 'tag', 'volume_type']) bdm_db_only_fields = set(['id', 'instance_uuid', 'attachment_id', 'uuid']) @@ -173,6 +180,7 @@ def from_api(cls, api_dict, image_uuid_specified): source_type = api_dict.get('source_type') device_uuid = api_dict.get('uuid') destination_type = api_dict.get('destination_type') + volume_type = api_dict.get('volume_type') if source_type == 'blank' and device_uuid: raise exception.InvalidBDMFormat( @@ -191,12 +199,24 @@ def from_api(cls, api_dict, image_uuid_specified): boot_index = -1 boot_index = int(boot_index) - # if this bdm is generated from --image ,then + # if this bdm is generated from --image, then # source_type = image and destination_type = local is allowed if not (image_uuid_specified and boot_index == 0): raise exception.InvalidBDMFormat( details=_("Mapping image to local is not supported.")) + if destination_type == 'local' and volume_type: + raise exception.InvalidBDMFormat( + details=_("Specifying a volume_type with destination_type=" + "local is not supported.")) + + # Specifying a volume_type with a pre-existing source volume is + # not supported. + if source_type == 'volume' and volume_type: + raise exception.InvalidBDMFormat( + details=_("Specifying volume type to existing volume is " + "not supported.")) + api_dict.pop('uuid', None) return cls(api_dict) @@ -291,30 +311,6 @@ def snapshot_from_bdm(snapshot_id, template): return BlockDeviceDict(snapshot_dict) -def legacy_mapping(block_device_mapping): - """Transform a list of block devices of an instance back to the - legacy data format. - """ - - legacy_block_device_mapping = [] - - for bdm in block_device_mapping: - try: - legacy_block_device = BlockDeviceDict(bdm).legacy() - except exception.InvalidBDMForLegacy: - continue - - legacy_block_device_mapping.append(legacy_block_device) - - # Re-enumerate the ephemeral devices - for i, dev in enumerate(dev for dev in legacy_block_device_mapping - if dev['virtual_name'] and - is_ephemeral(dev['virtual_name'])): - dev['virtual_name'] = dev['virtual_name'][:-1] + str(i) - - return legacy_block_device_mapping - - def from_legacy_mapping(legacy_block_device_mapping, image_uuid='', root_device_name=None, no_root=False): """Transform a legacy list of block devices to the new data format.""" @@ -406,7 +402,7 @@ def validate_and_default_volume_size(bdm): details=_("Invalid volume_size.")) -_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$') +_ephemeral = re.compile(r'^ephemeral(\d|[1-9]\d+)$') def is_ephemeral(device_name): @@ -486,7 +482,7 @@ def strip_prefix(device_name): return _pref.sub('', device_name) if device_name else device_name -_nums = re.compile('\d+') +_nums = re.compile(r'\d+') def get_device_letter(device_name): @@ -496,14 +492,36 @@ def get_device_letter(device_name): return _nums.sub('', letter) if device_name else device_name +def generate_device_letter(index): + """Returns device letter by index (starts by zero) + i.e. + index = 0, 1,..., 18277 + results = a, b,..., zzz + """ + base = ord('z') - ord('a') + 1 + unit_dev_name = "" + while index >= 0: + letter = chr(ord('a') + (index % base)) + unit_dev_name = letter + unit_dev_name + index = int(index / base) - 1 + + return unit_dev_name + + +def generate_device_name(prefix, index): + """Returns device unit name by index (starts by zero) + i.e. + prefix = vd + index = 0, 1,..., 18277 + results = vda, vdb,..., vdzzz + """ + return prefix + generate_device_letter(index) + + def instance_block_mapping(instance, bdms): root_device_name = instance['root_device_name'] - # NOTE(clayg): remove this when xenapi is setting default_root_device if root_device_name is None: - if driver.is_xenapi(): - root_device_name = '/dev/xvda' - else: - return _DEFAULT_MAPPINGS + return _DEFAULT_MAPPINGS mappings = {} mappings['ami'] = strip_dev(root_device_name) @@ -590,3 +608,89 @@ def get_bdm_swap_list(block_device_mappings): def get_bdm_local_disk_num(block_device_mappings): return len([bdm for bdm in block_device_mappings if bdm.get('destination_type') == 'local']) + + +def get_bdm_image_metadata(context, image_api, volume_api, + block_device_mapping, legacy_bdm=True): + """Attempt to retrive image metadata from a given block_device_mapping. + + If we are booting from a volume, we need to get the volume details from + Cinder and make sure we pass the metadata back accordingly. + + :param context: request context + :param image_api: Image API + :param volume_api: Volume API + :param block_device_mapping: + :param legacy_bdm: + """ + if not block_device_mapping: + return {} + + for bdm in block_device_mapping: + if (legacy_bdm and + get_device_letter( + bdm.get('device_name', '')) != 'a'): + continue + elif not legacy_bdm and bdm.get('boot_index') != 0: + continue + + volume_id = bdm.get('volume_id') + snapshot_id = bdm.get('snapshot_id') + if snapshot_id: + # NOTE(alaski): A volume snapshot inherits metadata from the + # originating volume, but the API does not expose metadata + # on the snapshot itself. So we query the volume for it below. + snapshot = volume_api.get_snapshot(context, snapshot_id) + volume_id = snapshot['volume_id'] + + if bdm.get('image_id'): + try: + image_id = bdm['image_id'] + image_meta = image_api.get(context, image_id) + return image_meta + except Exception: + raise exception.InvalidBDMImage(id=image_id) + elif volume_id: + try: + volume = volume_api.get(context, volume_id) + except exception.CinderConnectionFailed: + raise + except Exception: + raise exception.InvalidBDMVolume(id=volume_id) + + if not volume.get('bootable', True): + raise exception.InvalidBDMVolumeNotBootable(id=volume_id) + + return get_image_metadata_from_volume(volume) + return {} + + +def get_image_metadata_from_volume(volume): + properties = copy.copy(volume.get('volume_image_metadata', {})) + image_meta = {'properties': properties} + # Volume size is no longer related to the original image size, + # so we take it from the volume directly. Cinder creates + # volumes in Gb increments, and stores size in Gb, whereas + # glance reports size in bytes. As we're returning glance + # metadata here, we need to convert it. + image_meta['size'] = volume.get('size', 0) * units.Gi + # NOTE(yjiang5): restore the basic attributes + # NOTE(mdbooth): These values come from volume_glance_metadata + # in cinder. This is a simple key/value table, and all values + # are strings. We need to convert them to ints to avoid + # unexpected type errors. + for attr in VIM_IMAGE_ATTRIBUTES: + val = properties.pop(attr, None) + if attr in ('min_ram', 'min_disk'): + image_meta[attr] = int(val or 0) + # NOTE(mriedem): Set the status to 'active' as a really old hack + # from when this method was in the compute API class and is + # needed for _validate_flavor_image which makes sure the image + # is 'active'. For volume-backed servers, if the volume is not + # available because the image backing the volume is not active, + # then the compute API trying to reserve the volume should fail. + image_meta['status'] = 'active' + + # the owner might be missing if cinder policy disallows it + image_meta['owner'] = volume.get('owner', None) + return image_meta diff --git a/nova/cache_utils.py b/nova/cache_utils.py index 6a5bd4418ae..1892e93f75f 100644 --- a/nova/cache_utils.py +++ b/nova/cache_utils.py @@ -109,22 +109,8 @@ def get(self, key): return None return value - def get_or_create(self, key, creator): - return self.region.get_or_create(key, creator) - def set(self, key, value): return self.region.set(key, value) - def add(self, key, value): - return self.region.get_or_create(key, lambda: value) - def delete(self, key): return self.region.delete(key) - - def get_multi(self, keys): - values = self.region.get_multi(keys) - return [None if value is cache.NO_VALUE else value for value in - values] - - def delete_multi(self, keys): - return self.region.delete_multi(keys) diff --git a/nova/cells/__init__.py b/nova/cells/__init__.py deleted file mode 100644 index caf1eab3f2e..00000000000 --- a/nova/cells/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cells -""" - -TOPIC = 'cells' diff --git a/nova/cells/driver.py b/nova/cells/driver.py deleted file mode 100644 index 922e42e5494..00000000000 --- a/nova/cells/driver.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Base Cells Communication Driver -""" - - -class BaseCellsDriver(object): - """The base class for cells communication. - - One instance of this class will be created for every neighbor cell - that we find in the DB and it will be associated with the cell in - its CellState. - - One instance is also created by the cells manager for setting up - the consumers. - """ - def start_servers(self, msg_runner): - """Start any messaging servers the driver may need.""" - raise NotImplementedError() - - def stop_servers(self): - """Stop accepting messages.""" - raise NotImplementedError() - - def send_message_to_cell(self, cell_state, message): - """Send a message to a cell.""" - raise NotImplementedError() diff --git a/nova/cells/filters/__init__.py b/nova/cells/filters/__init__.py deleted file mode 100644 index 33988c2ccf0..00000000000 --- a/nova/cells/filters/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2012-2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cell scheduler filters -""" - -from nova import filters - - -class BaseCellFilter(filters.BaseFilter): - """Base class for cell filters.""" - - def authorized(self, ctxt): - """Return whether or not the context is authorized for this filter - based on policy. - The policy action is "cells_scheduler_filter:" where - is the name of the filter class. - """ - name = 'cells_scheduler_filter:' + self.__class__.__name__ - return ctxt.can(name, fatal=False) - - def _filter_one(self, cell, filter_properties): - return self.cell_passes(cell, filter_properties) - - def cell_passes(self, cell, filter_properties): - """Return True if the CellState passes the filter, otherwise False. - Override this in a subclass. - """ - raise NotImplementedError() - - -class CellFilterHandler(filters.BaseFilterHandler): - def __init__(self): - super(CellFilterHandler, self).__init__(BaseCellFilter) - - -def all_filters(): - """Return a list of filter classes found in this directory. - - This method is used as the default for available scheduler filters - and should return a list of all filter classes available. - """ - return CellFilterHandler().get_all_classes() diff --git a/nova/cells/filters/different_cell.py b/nova/cells/filters/different_cell.py deleted file mode 100644 index 00607fad81a..00000000000 --- a/nova/cells/filters/different_cell.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Different cell filter. - -A scheduler hint of 'different_cell' with a value of a full cell name may be -specified to route a build away from a particular cell. -""" - -import six - -from nova.cells import filters -from nova.cells import utils as cells_utils - - -class DifferentCellFilter(filters.BaseCellFilter): - """Different cell filter. Works by specifying a scheduler hint of - 'different_cell'. The value should be the full cell path. - """ - def filter_all(self, cells, filter_properties): - """Override filter_all() which operates on the full list - of cells... - """ - scheduler_hints = filter_properties.get('scheduler_hints') - if not scheduler_hints: - return cells - - cell_routes = scheduler_hints.get('different_cell') - if not cell_routes: - return cells - if isinstance(cell_routes, six.string_types): - cell_routes = [cell_routes] - - if not self.authorized(filter_properties['context']): - # No filtering, if not authorized. - return cells - - routing_path = filter_properties['routing_path'] - filtered_cells = [] - for cell in cells: - if not self._cell_state_matches(cell, routing_path, cell_routes): - filtered_cells.append(cell) - - return filtered_cells - - def _cell_state_matches(self, cell_state, routing_path, cell_routes): - cell_route = routing_path - if not cell_state.is_me: - cell_route += cells_utils.PATH_CELL_SEP + cell_state.name - if cell_route in cell_routes: - return True - return False diff --git a/nova/cells/filters/image_properties.py b/nova/cells/filters/image_properties.py deleted file mode 100644 index 69ef33b6112..00000000000 --- a/nova/cells/filters/image_properties.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2012-2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Image properties filter. - -Image metadata named 'hypervisor_version_requires' with a version specification -may be specified to ensure the build goes to a cell which has hypervisors of -the required version. - -If either the version requirement on the image or the hypervisor capability -of the cell is not present, this filter returns without filtering out the -cells. -""" - -from distutils import versionpredicate - -from nova.cells import filters - - -class ImagePropertiesFilter(filters.BaseCellFilter): - """Image properties filter. Works by specifying the hypervisor required in - the image metadata and the supported hypervisor version in cell - capabilities. - """ - - def filter_all(self, cells, filter_properties): - """Override filter_all() which operates on the full list - of cells... - """ - request_spec = filter_properties.get('request_spec', {}) - image_properties = request_spec.get('image', {}).get('properties', {}) - hypervisor_version_requires = image_properties.get( - 'hypervisor_version_requires') - - if hypervisor_version_requires is None: - return cells - - filtered_cells = [] - for cell in cells: - version = cell.capabilities.get('prominent_hypervisor_version') - if version: - l = list(version) - version = str(l[0]) - - if not version or self._matches_version(version, - hypervisor_version_requires): - filtered_cells.append(cell) - - return filtered_cells - - def _matches_version(self, version, version_requires): - predicate = versionpredicate.VersionPredicate( - 'prop (%s)' % version_requires) - return predicate.satisfied_by(version) diff --git a/nova/cells/filters/target_cell.py b/nova/cells/filters/target_cell.py deleted file mode 100644 index fdf2362daa1..00000000000 --- a/nova/cells/filters/target_cell.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2012-2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Target cell filter. - -A scheduler hint of 'target_cell' with a value of a full cell name may be -specified to route a build to a particular cell. No error handling is -done as there's no way to know whether the full path is a valid. -""" - -from oslo_log import log as logging - -from nova.cells import filters - -LOG = logging.getLogger(__name__) - - -class TargetCellFilter(filters.BaseCellFilter): - """Target cell filter. Works by specifying a scheduler hint of - 'target_cell'. The value should be the full cell path. - """ - - def filter_all(self, cells, filter_properties): - """Override filter_all() which operates on the full list - of cells... - """ - scheduler_hints = filter_properties.get('scheduler_hints') - if not scheduler_hints: - return cells - - # This filter only makes sense at the top level, as a full - # cell name is specified. So we pop 'target_cell' out of the - # hints dict. - cell_name = scheduler_hints.pop('target_cell', None) - if not cell_name: - return cells - - # This authorization is after popping off target_cell, so - # that in case this fails, 'target_cell' is not left in the - # dict when child cells go to schedule. - if not self.authorized(filter_properties['context']): - # No filtering, if not authorized. - return cells - - LOG.info("Forcing direct route to %(cell_name)s because " - "of 'target_cell' scheduler hint", - {'cell_name': cell_name}) - - scheduler = filter_properties['scheduler'] - if cell_name == filter_properties['routing_path']: - return [scheduler.state_manager.get_my_state()] - ctxt = filter_properties['context'] - - scheduler.msg_runner.build_instances(ctxt, cell_name, - filter_properties['host_sched_kwargs']) - - # Returning None means to skip further scheduling, because we - # handled it. diff --git a/nova/cells/manager.py b/nova/cells/manager.py deleted file mode 100644 index 23d7cb7dc3a..00000000000 --- a/nova/cells/manager.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cells Service Manager -""" -import datetime -import time - -from oslo_log import log as logging -import oslo_messaging -from oslo_service import periodic_task -from oslo_utils import timeutils - -from six.moves import range - -from nova.cells import messaging -from nova.cells import rpc_driver as cells_rpc_driver -from nova.cells import state as cells_state -from nova.cells import utils as cells_utils -from nova.compute import rpcapi as compute_rpcapi -import nova.conf -from nova import context -from nova import exception -from nova import manager -from nova import objects -from nova.objects import base as base_obj -from nova.objects import instance as instance_obj - - -CONF = nova.conf.CONF - -LOG = logging.getLogger(__name__) - - -class CellsManager(manager.Manager): - """The nova-cells manager class. This class defines RPC - methods that the local cell may call. This class is NOT used for - messages coming from other cells. That communication is - driver-specific. - - Communication to other cells happens via the nova.cells.messaging module. - The MessageRunner from that module will handle routing the message to - the correct cell via the communications driver. Most methods below - create 'targeted' (where we want to route a message to a specific cell) - or 'broadcast' (where we want a message to go to multiple cells) - messages. - - Scheduling requests get passed to the scheduler class. - """ - - target = oslo_messaging.Target(version='1.38') - - def __init__(self, *args, **kwargs): - LOG.warning('The cells feature of Nova is considered experimental ' - 'by the OpenStack project because it receives much ' - 'less testing than the rest of Nova. This may change ' - 'in the future, but current deployers should be aware ' - 'that the use of it in production right now may be ' - 'risky. Also note that cells does not currently ' - 'support rolling upgrades, it is assumed that cells ' - 'deployments are upgraded lockstep so n-1 cells ' - 'compatibility does not work.') - # Mostly for tests. - cell_state_manager = kwargs.pop('cell_state_manager', None) - super(CellsManager, self).__init__(service_name='cells', - *args, **kwargs) - if cell_state_manager is None: - cell_state_manager = cells_state.CellStateManager - self.state_manager = cell_state_manager() - self.msg_runner = messaging.MessageRunner(self.state_manager) - self.driver = cells_rpc_driver.CellsRPCDriver() - self.instances_to_heal = iter([]) - - def post_start_hook(self): - """Have the driver start its servers for inter-cell communication. - Also ask our child cells for their capacities and capabilities so - we get them more quickly than just waiting for the next periodic - update. Receiving the updates from the children will cause us to - update our parents. If we don't have any children, just update - our parents immediately. - """ - # FIXME(comstud): There's currently no hooks when services are - # stopping, so we have no way to stop servers cleanly. - self.driver.start_servers(self.msg_runner) - ctxt = context.get_admin_context() - if self.state_manager.get_child_cells(): - self.msg_runner.ask_children_for_capabilities(ctxt) - self.msg_runner.ask_children_for_capacities(ctxt) - else: - self._update_our_parents(ctxt) - - @periodic_task.periodic_task - def _update_our_parents(self, ctxt): - """Update our parent cells with our capabilities and capacity - if we're at the bottom of the tree. - """ - self.msg_runner.tell_parents_our_capabilities(ctxt) - self.msg_runner.tell_parents_our_capacities(ctxt) - - @periodic_task.periodic_task - def _heal_instances(self, ctxt): - """Periodic task to send updates for a number of instances to - parent cells. - - On every run of the periodic task, we will attempt to sync - 'CONF.cells.instance_update_num_instances' number of instances. - When we get the list of instances, we shuffle them so that multiple - nova-cells services aren't attempting to sync the same instances - in lockstep. - - If CONF.cells.instance_update_at_threshold is set, only attempt - to sync instances that have been updated recently. The CONF - setting defines the maximum number of seconds old the updated_at - can be. Ie, a threshold of 3600 means to only update instances - that have modified in the last hour. - """ - - if not self.state_manager.get_parent_cells(): - # No need to sync up if we have no parents. - return - - info = {'updated_list': False} - - def _next_instance(): - try: - instance = next(self.instances_to_heal) - except StopIteration: - if info['updated_list']: - return - threshold = CONF.cells.instance_updated_at_threshold - updated_since = None - if threshold > 0: - updated_since = timeutils.utcnow() - datetime.timedelta( - seconds=threshold) - self.instances_to_heal = cells_utils.get_instances_to_sync( - ctxt, updated_since=updated_since, shuffle=True, - uuids_only=True) - info['updated_list'] = True - try: - instance = next(self.instances_to_heal) - except StopIteration: - return - return instance - - rd_context = ctxt.elevated(read_deleted='yes') - - for i in range(CONF.cells.instance_update_num_instances): - while True: - # Yield to other greenthreads - time.sleep(0) - instance_uuid = _next_instance() - if not instance_uuid: - return - try: - instance = objects.Instance.get_by_uuid(rd_context, - instance_uuid) - except exception.InstanceNotFound: - continue - self._sync_instance(ctxt, instance) - break - - def _sync_instance(self, ctxt, instance): - """Broadcast an instance_update or instance_destroy message up to - parent cells. - """ - if instance.deleted: - self.instance_destroy_at_top(ctxt, instance) - else: - self.instance_update_at_top(ctxt, instance) - - def build_instances(self, ctxt, build_inst_kwargs): - """Pick a cell (possibly ourselves) to build new instance(s) and - forward the request accordingly. - """ - # Target is ourselves first. - filter_properties = build_inst_kwargs.get('filter_properties') - if (filter_properties is not None and - not isinstance(filter_properties['instance_type'], - objects.Flavor)): - # NOTE(danms): Handle pre-1.30 build_instances() call. Remove me - # when we bump the RPC API version to 2.0. - flavor = objects.Flavor(**filter_properties['instance_type']) - build_inst_kwargs['filter_properties'] = dict( - filter_properties, instance_type=flavor) - instances = build_inst_kwargs['instances'] - if not isinstance(instances[0], objects.Instance): - # NOTE(danms): Handle pre-1.32 build_instances() call. Remove me - # when we bump the RPC API version to 2.0 - build_inst_kwargs['instances'] = instance_obj._make_instance_list( - ctxt, objects.InstanceList(), instances, ['system_metadata', - 'metadata']) - our_cell = self.state_manager.get_my_state() - self.msg_runner.build_instances(ctxt, our_cell, build_inst_kwargs) - - def get_cell_info_for_neighbors(self, _ctxt): - """Return cell information for our neighbor cells.""" - return self.state_manager.get_cell_info_for_neighbors() - - def run_compute_api_method(self, ctxt, cell_name, method_info, call): - """Call a compute API method in a specific cell.""" - response = self.msg_runner.run_compute_api_method(ctxt, - cell_name, - method_info, - call) - if call: - return response.value_or_raise() - - def instance_update_at_top(self, ctxt, instance): - """Update an instance at the top level cell.""" - self.msg_runner.instance_update_at_top(ctxt, instance) - - def instance_destroy_at_top(self, ctxt, instance): - """Destroy an instance at the top level cell.""" - self.msg_runner.instance_destroy_at_top(ctxt, instance) - - def instance_delete_everywhere(self, ctxt, instance, delete_type): - """This is used by API cell when it didn't know what cell - an instance was in, but the instance was requested to be - deleted or soft_deleted. So, we'll broadcast this everywhere. - """ - if isinstance(instance, dict): - instance = objects.Instance._from_db_object(ctxt, - objects.Instance(), instance) - self.msg_runner.instance_delete_everywhere(ctxt, instance, - delete_type) - - def instance_fault_create_at_top(self, ctxt, instance_fault): - """Create an instance fault at the top level cell.""" - self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault) - - def bw_usage_update_at_top(self, ctxt, bw_update_info): - """Update bandwidth usage at top level cell.""" - self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info) - - def sync_instances(self, ctxt, project_id, updated_since, deleted): - """Force a sync of all instances, potentially by project_id, - and potentially since a certain date/time. - """ - self.msg_runner.sync_instances(ctxt, project_id, updated_since, - deleted) - - def service_get_all(self, ctxt, filters): - """Return services in this cell and in all child cells.""" - responses = self.msg_runner.service_get_all(ctxt, filters) - ret_services = [] - # 1 response per cell. Each response is a list of services. - for response in responses: - services = response.value_or_raise() - for service in services: - service = cells_utils.add_cell_to_service( - service, response.cell_name) - ret_services.append(service) - return ret_services - - @oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency) - def service_get_by_compute_host(self, ctxt, host_name): - """Return a service entry for a compute host in a certain cell.""" - cell_name, host_name = cells_utils.split_cell_and_item(host_name) - response = self.msg_runner.service_get_by_compute_host(ctxt, - cell_name, - host_name) - service = response.value_or_raise() - service = cells_utils.add_cell_to_service(service, response.cell_name) - return service - - def get_host_uptime(self, ctxt, host_name): - """Return host uptime for a compute host in a certain cell - - :param host_name: fully qualified hostname. It should be in format of - parent!child@host_id - """ - cell_name, host_name = cells_utils.split_cell_and_item(host_name) - response = self.msg_runner.get_host_uptime(ctxt, cell_name, - host_name) - return response.value_or_raise() - - def service_update(self, ctxt, host_name, binary, params_to_update): - """Used to enable/disable a service. For compute services, setting to - disabled stops new builds arriving on that host. - - :param host_name: the name of the host machine that the service is - running - :param binary: The name of the executable that the service runs as - :param params_to_update: eg. {'disabled': True} - :returns: the service reference - """ - cell_name, host_name = cells_utils.split_cell_and_item(host_name) - response = self.msg_runner.service_update( - ctxt, cell_name, host_name, binary, params_to_update) - service = response.value_or_raise() - service = cells_utils.add_cell_to_service(service, response.cell_name) - return service - - def service_delete(self, ctxt, cell_service_id): - """Deletes the specified service.""" - cell_name, service_id = cells_utils.split_cell_and_item( - cell_service_id) - self.msg_runner.service_delete(ctxt, cell_name, service_id) - - @oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency) - def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout): - """Proxy an RPC message as-is to a manager.""" - compute_topic = compute_rpcapi.RPC_TOPIC - cell_and_host = topic[len(compute_topic) + 1:] - cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host) - response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name, - host_name, topic, rpc_message, call, timeout) - return response.value_or_raise() - - def task_log_get_all(self, ctxt, task_name, period_beginning, - period_ending, host=None, state=None): - """Get task logs from the DB from all cells or a particular - cell. - - If 'host' is not None, host will be of the format 'cell!name@host', - with '@host' being optional. The query will be directed to the - appropriate cell and return all task logs, or task logs matching - the host if specified. - - 'state' also may be None. If it's not, filter by the state as well. - """ - if host is None: - cell_name = None - else: - cell_name, host = cells_utils.split_cell_and_item(host) - # If no cell name was given, assume that the host name is the - # cell_name and that the target is all hosts - if cell_name is None: - cell_name, host = host, cell_name - responses = self.msg_runner.task_log_get_all(ctxt, cell_name, - task_name, period_beginning, period_ending, - host=host, state=state) - # 1 response per cell. Each response is a list of task log - # entries. - ret_task_logs = [] - for response in responses: - task_logs = response.value_or_raise() - for task_log in task_logs: - cells_utils.add_cell_to_task_log(task_log, - response.cell_name) - ret_task_logs.append(task_log) - return ret_task_logs - - @oslo_messaging.expected_exceptions(exception.CellRoutingInconsistency) - def compute_node_get(self, ctxt, compute_id): - """Get a compute node by ID or UUID in a specific cell.""" - cell_name, compute_id = cells_utils.split_cell_and_item( - compute_id) - response = self.msg_runner.compute_node_get(ctxt, cell_name, - compute_id) - node = response.value_or_raise() - node = cells_utils.add_cell_to_compute_node(node, cell_name) - return node - - def compute_node_get_all(self, ctxt, hypervisor_match=None): - """Return list of compute nodes in all cells.""" - responses = self.msg_runner.compute_node_get_all(ctxt, - hypervisor_match=hypervisor_match) - # 1 response per cell. Each response is a list of compute_node - # entries. - ret_nodes = [] - for response in responses: - nodes = response.value_or_raise() - for node in nodes: - node = cells_utils.add_cell_to_compute_node(node, - response.cell_name) - ret_nodes.append(node) - return ret_nodes - - def compute_node_stats(self, ctxt): - """Return compute node stats totals from all cells.""" - responses = self.msg_runner.compute_node_stats(ctxt) - totals = {} - for response in responses: - data = response.value_or_raise() - for key, val in data.items(): - totals.setdefault(key, 0) - totals[key] += val - return totals - - def actions_get(self, ctxt, cell_name, instance_uuid): - response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid) - return response.value_or_raise() - - def action_get_by_request_id(self, ctxt, cell_name, instance_uuid, - request_id): - response = self.msg_runner.action_get_by_request_id(ctxt, cell_name, - instance_uuid, - request_id) - return response.value_or_raise() - - def action_events_get(self, ctxt, cell_name, action_id): - response = self.msg_runner.action_events_get(ctxt, cell_name, - action_id) - return response.value_or_raise() - - def consoleauth_delete_tokens(self, ctxt, instance_uuid): - """Delete consoleauth tokens for an instance in API cells.""" - self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid) - - def validate_console_port(self, ctxt, instance_uuid, console_port, - console_type): - """Validate console port with child cell compute node.""" - instance = objects.Instance.get_by_uuid(ctxt, instance_uuid) - if not instance.cell_name: - raise exception.InstanceUnknownCell(instance_uuid=instance_uuid) - response = self.msg_runner.validate_console_port(ctxt, - instance.cell_name, instance_uuid, console_port, - console_type) - return response.value_or_raise() - - def get_capacities(self, ctxt, cell_name): - return self.state_manager.get_capacities(cell_name) - - def bdm_update_or_create_at_top(self, ctxt, bdm, create=None): - """BDM was created/updated in this cell. Tell the API cells.""" - # TODO(ndipanov): Move inter-cell RPC to use objects - bdm = base_obj.obj_to_primitive(bdm) - self.msg_runner.bdm_update_or_create_at_top(ctxt, bdm, create=create) - - def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None, - volume_id=None): - """BDM was destroyed for instance in this cell. Tell the API cells.""" - self.msg_runner.bdm_destroy_at_top(ctxt, instance_uuid, - device_name=device_name, - volume_id=volume_id) - - def get_migrations(self, ctxt, filters): - """Fetch migrations applying the filters.""" - target_cell = None - if "cell_name" in filters: - _path_cell_sep = cells_utils.PATH_CELL_SEP - target_cell = '%s%s%s' % (CONF.cells.name, _path_cell_sep, - filters['cell_name']) - - responses = self.msg_runner.get_migrations(ctxt, target_cell, - False, filters) - migrations = [] - for response in responses: - # response.value_or_raise returns MigrationList objects. - # MigrationList.objects returns the list of Migration objects. - migrations.extend(response.value_or_raise().objects) - return objects.MigrationList(objects=migrations) - - def instance_update_from_api(self, ctxt, instance, expected_vm_state, - expected_task_state, admin_state_reset): - """Update an instance in its cell.""" - self.msg_runner.instance_update_from_api(ctxt, instance, - expected_vm_state, - expected_task_state, - admin_state_reset) - - def start_instance(self, ctxt, instance): - """Start an instance in its cell.""" - self.msg_runner.start_instance(ctxt, instance) - - def stop_instance(self, ctxt, instance, do_cast=True, - clean_shutdown=True): - """Stop an instance in its cell.""" - response = self.msg_runner.stop_instance(ctxt, instance, - do_cast=do_cast, - clean_shutdown=clean_shutdown) - if not do_cast: - return response.value_or_raise() - - def cell_create(self, ctxt, values): - return self.state_manager.cell_create(ctxt, values) - - def cell_update(self, ctxt, cell_name, values): - return self.state_manager.cell_update(ctxt, cell_name, values) - - def cell_delete(self, ctxt, cell_name): - return self.state_manager.cell_delete(ctxt, cell_name) - - def cell_get(self, ctxt, cell_name): - return self.state_manager.cell_get(ctxt, cell_name) - - def reboot_instance(self, ctxt, instance, reboot_type): - """Reboot an instance in its cell.""" - self.msg_runner.reboot_instance(ctxt, instance, reboot_type) - - def pause_instance(self, ctxt, instance): - """Pause an instance in its cell.""" - self.msg_runner.pause_instance(ctxt, instance) - - def unpause_instance(self, ctxt, instance): - """Unpause an instance in its cell.""" - self.msg_runner.unpause_instance(ctxt, instance) - - def suspend_instance(self, ctxt, instance): - """Suspend an instance in its cell.""" - self.msg_runner.suspend_instance(ctxt, instance) - - def resume_instance(self, ctxt, instance): - """Resume an instance in its cell.""" - self.msg_runner.resume_instance(ctxt, instance) - - def terminate_instance(self, ctxt, instance, delete_type='delete'): - """Delete an instance in its cell.""" - # NOTE(rajesht): The `delete_type` parameter is passed so that it will - # be routed to destination cell, where instance deletion will happen. - self.msg_runner.terminate_instance(ctxt, instance, - delete_type=delete_type) - - def soft_delete_instance(self, ctxt, instance): - """Soft-delete an instance in its cell.""" - self.msg_runner.soft_delete_instance(ctxt, instance) - - def resize_instance(self, ctxt, instance, flavor, - extra_instance_updates, - clean_shutdown=True): - """Resize an instance in its cell.""" - self.msg_runner.resize_instance(ctxt, instance, - flavor, extra_instance_updates, - clean_shutdown=clean_shutdown) - - def live_migrate_instance(self, ctxt, instance, block_migration, - disk_over_commit, host_name): - """Live migrate an instance in its cell.""" - self.msg_runner.live_migrate_instance(ctxt, instance, - block_migration, - disk_over_commit, - host_name) - - def revert_resize(self, ctxt, instance): - """Revert a resize for an instance in its cell.""" - self.msg_runner.revert_resize(ctxt, instance) - - def confirm_resize(self, ctxt, instance): - """Confirm a resize for an instance in its cell.""" - self.msg_runner.confirm_resize(ctxt, instance) - - def reset_network(self, ctxt, instance): - """Reset networking for an instance in its cell.""" - self.msg_runner.reset_network(ctxt, instance) - - def inject_network_info(self, ctxt, instance): - """Inject networking for an instance in its cell.""" - self.msg_runner.inject_network_info(ctxt, instance) - - def snapshot_instance(self, ctxt, instance, image_id): - """Snapshot an instance in its cell.""" - self.msg_runner.snapshot_instance(ctxt, instance, image_id) - - def backup_instance(self, ctxt, instance, image_id, backup_type, rotation): - """Backup an instance in its cell.""" - self.msg_runner.backup_instance(ctxt, instance, image_id, - backup_type, rotation) - - def rebuild_instance(self, ctxt, instance, image_href, admin_password, - files_to_inject, preserve_ephemeral, kwargs): - self.msg_runner.rebuild_instance(ctxt, instance, image_href, - admin_password, files_to_inject, - preserve_ephemeral, kwargs) - - def set_admin_password(self, ctxt, instance, new_pass): - self.msg_runner.set_admin_password(ctxt, instance, new_pass) - - def get_keypair_at_top(self, ctxt, user_id, name): - responses = self.msg_runner.get_keypair_at_top(ctxt, user_id, name) - keypairs = [resp.value for resp in responses if resp.value is not None] - - if len(keypairs) == 0: - return None - elif len(keypairs) > 1: - cell_names = ', '.join([resp.cell_name for resp in responses - if resp.value is not None]) - LOG.warning("The same keypair name '%(name)s' exists in the " - "following cells: %(cell_names)s. The keypair " - "value from the first cell is returned.", - {'name': name, 'cell_names': cell_names}) - - return keypairs[0] diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py deleted file mode 100644 index d0d11dd5a63..00000000000 --- a/nova/cells/messaging.py +++ /dev/null @@ -1,1964 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cell messaging module. - -This module defines the different message types that are passed between -cells and the methods that they can call when the target cell has been -reached. - -The interface into this module is the MessageRunner class. -""" - -import sys -import traceback - -from eventlet import queue -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_serialization import jsonutils -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six -from six.moves import range - -from nova.cells import state as cells_state -from nova.cells import utils as cells_utils -from nova import compute -from nova.compute import instance_actions -from nova.compute import rpcapi as compute_rpcapi -from nova.compute import task_states -from nova.compute import vm_states -import nova.conf -from nova.consoleauth import rpcapi as consoleauth_rpcapi -from nova import context -from nova.db import base -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.objects import base as objects_base -from nova import rpc -from nova import utils - -CONF = nova.conf.CONF - -LOG = logging.getLogger(__name__) - -# Separator used between cell names for the 'full cell name' and routing -# path. -_PATH_CELL_SEP = cells_utils.PATH_CELL_SEP - - -def _reverse_path(path): - """Reverse a path. Used for sending responses upstream.""" - path_parts = path.split(_PATH_CELL_SEP) - path_parts.reverse() - return _PATH_CELL_SEP.join(path_parts) - - -def _response_cell_name_from_path(routing_path, neighbor_only=False): - """Reverse the routing_path. If we only want to send to our parent, - set neighbor_only to True. - """ - path = _reverse_path(routing_path) - if not neighbor_only or len(path) == 1: - return path - return _PATH_CELL_SEP.join(path.split(_PATH_CELL_SEP)[:2]) - - -# -# Message classes. -# - - -class _BaseMessage(object): - """Base message class. It defines data that is passed with every - single message through every cell. - - Messages are JSON-ified before sending and turned back into a - class instance when being received. - - Every message has a unique ID. This is used to route responses - back to callers. In the future, this might be used to detect - receiving the same message more than once. - - routing_path is updated on every hop through a cell. The current - cell name is appended to it (cells are separated by - _PATH_CELL_SEP ('!')). This is used to tell if we've reached the - target cell and also to determine the source of a message for - responses by reversing it. - - hop_count is incremented and compared against max_hop_count. The - only current usefulness of this is to break out of a routing loop - if someone has a broken config. - - fanout means to send to all nova-cells services running in a cell. - This is useful for capacity and capability broadcasting as well - as making sure responses get back to the nova-cells service that - is waiting. - """ - - # Override message_type in a subclass - message_type = None - - base_attrs_to_json = ['message_type', - 'ctxt', - 'method_name', - 'method_kwargs', - 'direction', - 'need_response', - 'fanout', - 'uuid', - 'routing_path', - 'hop_count', - 'max_hop_count'] - - def __init__(self, msg_runner, ctxt, method_name, method_kwargs, - direction, need_response=False, fanout=False, uuid=None, - routing_path=None, hop_count=0, max_hop_count=None, - **kwargs): - self.ctxt = ctxt - self.resp_queue = None - self.msg_runner = msg_runner - self.state_manager = msg_runner.state_manager - # Copy these. - self.base_attrs_to_json = self.base_attrs_to_json[:] - # Normally this would just be CONF.cells.name, but going through - # the msg_runner allows us to stub it more easily. - self.our_path_part = self.msg_runner.our_name - self.uuid = uuid - if self.uuid is None: - self.uuid = uuidutils.generate_uuid() - self.method_name = method_name - self.method_kwargs = method_kwargs - self.direction = direction - self.need_response = need_response - self.fanout = fanout - self.routing_path = routing_path - self.hop_count = hop_count - if max_hop_count is None: - max_hop_count = CONF.cells.max_hop_count - self.max_hop_count = max_hop_count - self.is_broadcast = False - self._append_hop() - # Each sub-class should set this when the message is initialized - self.next_hops = [] - self.resp_queue = None - self.serializer = objects_base.NovaObjectSerializer() - - def __repr__(self): - _dict = self._to_dict() - _dict.pop('method_kwargs') - return "<%s: %s>" % (self.__class__.__name__, _dict) - - def _append_hop(self): - """Add our hop to the routing_path.""" - routing_path = (self.routing_path and - self.routing_path + _PATH_CELL_SEP or '') - self.routing_path = routing_path + self.our_path_part - self.hop_count += 1 - - def _process_locally(self): - """Its been determined that we should process this message in this - cell. Go through the MessageRunner to call the appropriate - method for this message. Catch the response and/or exception and - encode it within a Response instance. Return it so the caller - can potentially return it to another cell... or return it to - a caller waiting in this cell. - """ - try: - resp_value = self.msg_runner._process_message_locally(self) - failure = False - except Exception: - resp_value = sys.exc_info() - failure = True - LOG.exception("Error processing message locally") - return Response(self.ctxt, self.routing_path, resp_value, failure) - - def _setup_response_queue(self): - """Shortcut to creating a response queue in the MessageRunner.""" - self.resp_queue = self.msg_runner._setup_response_queue(self) - - def _cleanup_response_queue(self): - """Shortcut to deleting a response queue in the MessageRunner.""" - if self.resp_queue: - self.msg_runner._cleanup_response_queue(self) - self.resp_queue = None - - def _wait_for_json_responses(self, num_responses=1): - """Wait for response(s) to be put into the eventlet queue. Since - each queue entry actually contains a list of JSON-ified responses, - combine them all into a single list to return. - - Destroy the eventlet queue when done. - """ - if not self.resp_queue: - # Source is not actually expecting a response - return - responses = [] - wait_time = CONF.cells.call_timeout - try: - for x in range(num_responses): - json_responses = self.resp_queue.get(timeout=wait_time) - responses.extend(json_responses) - except queue.Empty: - raise exception.CellTimeout() - finally: - self._cleanup_response_queue() - return responses - - def _send_json_responses(self, json_responses, neighbor_only=False, - fanout=False): - """Send list of responses to this message. Responses passed here - are JSON-ified. Targeted messages have a single response while - Broadcast messages may have multiple responses. - - If this cell was the source of the message, these responses will - be returned from self.process(). - - Otherwise, we will route the response to the source of the - request. If 'neighbor_only' is True, the response will be sent - to the neighbor cell, not the original requester. Broadcast - messages get aggregated at each hop, so neighbor_only will be - True for those messages. - """ - if not self.need_response: - return - if self.source_is_us(): - responses = [] - for json_response in json_responses: - responses.append(Response.from_json(self.ctxt, json_response)) - return responses - direction = self.direction == 'up' and 'down' or 'up' - response_kwargs = {'orig_message': self.to_json(), - 'responses': json_responses} - target_cell = _response_cell_name_from_path(self.routing_path, - neighbor_only=neighbor_only) - response = self.msg_runner._create_response_message(self.ctxt, - direction, target_cell, self.uuid, response_kwargs, - fanout=fanout) - response.process() - - def _send_response(self, response, neighbor_only=False): - """Send a response to this message. If the source of the - request was ourselves, just return the response. It'll be - passed back to the caller of self.process(). See DocString for - _send_json_responses() as it handles most of the real work for - this method. - - 'response' is an instance of Response class. - """ - if not self.need_response: - return - if self.source_is_us(): - return response - self._send_json_responses([response.to_json()], - neighbor_only=neighbor_only) - - def _send_response_from_exception(self, exc_info): - """Take an exception as returned from sys.exc_info(), encode - it in a Response, and send it. - """ - response = Response(self.ctxt, self.routing_path, exc_info, True) - return self._send_response(response) - - def _to_dict(self): - """Convert a message to a dictionary. Only used internally.""" - _dict = {} - for key in self.base_attrs_to_json: - _dict[key] = getattr(self, key) - return _dict - - def to_json(self): - """Convert a message into JSON for sending to a sibling cell.""" - _dict = self._to_dict() - # Convert context to dict. - _dict['ctxt'] = _dict['ctxt'].to_dict() - # NOTE(comstud): 'method_kwargs' needs special serialization - # because it may contain objects. - method_kwargs = _dict['method_kwargs'] - for k, v in method_kwargs.items(): - method_kwargs[k] = self.serializer.serialize_entity(self.ctxt, v) - return jsonutils.dumps(_dict) - - def source_is_us(self): - """Did this cell create this message?""" - return self.routing_path == self.our_path_part - - def process(self): - """Process a message. Deal with it locally and/or forward it to a - sibling cell. - - Override in a subclass. - """ - raise NotImplementedError() - - -class _TargetedMessage(_BaseMessage): - """A targeted message is a message that is destined for a specific - single cell. - - 'target_cell' can be a full cell name like 'api!child-cell' or it can - be an instance of the CellState class if the target is a neighbor cell. - """ - message_type = 'targeted' - - def __init__(self, msg_runner, ctxt, method_name, method_kwargs, - direction, target_cell, **kwargs): - super(_TargetedMessage, self).__init__(msg_runner, ctxt, - method_name, method_kwargs, direction, **kwargs) - if isinstance(target_cell, cells_state.CellState): - # Neighbor cell or ourselves. Convert it to a 'full path'. - if target_cell.is_me: - target_cell = self.our_path_part - else: - target_cell = '%s%s%s' % (self.our_path_part, - _PATH_CELL_SEP, - target_cell.name) - # NOTE(alaski): This occurs when hosts are specified with no cells - # routing information. - if target_cell is None: - reason = _('No cell given in routing path.') - raise exception.CellRoutingInconsistency(reason=reason) - self.target_cell = target_cell - self.base_attrs_to_json.append('target_cell') - - def _get_next_hop(self): - """Return the cell name for the next hop. If the next hop is - the current cell, return None. - """ - if self.target_cell == self.routing_path: - return self.state_manager.my_cell_state - target_cell = self.target_cell - routing_path = self.routing_path - current_hops = routing_path.count(_PATH_CELL_SEP) - next_hop_num = current_hops + 1 - dest_hops = target_cell.count(_PATH_CELL_SEP) - if dest_hops < current_hops: - reason_args = {'target_cell': target_cell, - 'routing_path': routing_path} - reason = _("destination is %(target_cell)s but routing_path " - "is %(routing_path)s") % reason_args - raise exception.CellRoutingInconsistency(reason=reason) - dest_name_parts = target_cell.split(_PATH_CELL_SEP) - if (_PATH_CELL_SEP.join(dest_name_parts[:next_hop_num]) != - routing_path): - reason_args = {'target_cell': target_cell, - 'routing_path': routing_path} - reason = _("destination is %(target_cell)s but routing_path " - "is %(routing_path)s") % reason_args - raise exception.CellRoutingInconsistency(reason=reason) - next_hop_name = dest_name_parts[next_hop_num] - if self.direction == 'up': - next_hop = self.state_manager.get_parent_cell(next_hop_name) - else: - next_hop = self.state_manager.get_child_cell(next_hop_name) - if not next_hop: - cell_type = 'parent' if self.direction == 'up' else 'child' - reason_args = {'cell_type': cell_type, - 'target_cell': target_cell} - reason = _("Unknown %(cell_type)s when routing to " - "%(target_cell)s") % reason_args - raise exception.CellRoutingInconsistency(reason=reason) - return next_hop - - def process(self): - """Process a targeted message. This is called for all cells - that touch this message. If the local cell is the one that - created this message, we reply directly with a Response instance. - If the local cell is not the target, an eventlet queue is created - and we wait for the response to show up via another thread - receiving the Response back. - - Responses to targeted messages are routed directly back to the - source. No eventlet queues are created in intermediate hops. - - All exceptions for processing the message across the whole - routing path are caught and encoded within the Response and - returned to the caller. - """ - try: - next_hop = self._get_next_hop() - except Exception: - exc_info = sys.exc_info() - LOG.exception("Error locating next hop for message") - return self._send_response_from_exception(exc_info) - - if next_hop.is_me: - # Final destination. - response = self._process_locally() - return self._send_response(response) - - # Need to forward via neighbor cell. - if self.need_response and self.source_is_us(): - # A response is needed and the source of the message is - # this cell. Create the eventlet queue. - self._setup_response_queue() - wait_for_response = True - else: - wait_for_response = False - - try: - # This is inside the try block, so we can encode the - # exception and return it to the caller. - if self.hop_count >= self.max_hop_count: - raise exception.CellMaxHopCountReached( - hop_count=self.hop_count) - next_hop.send_message(self) - except Exception: - exc_info = sys.exc_info() - err_str = "Failed to send message to cell: %(next_hop)s" - LOG.exception(err_str, {'next_hop': next_hop}) - self._cleanup_response_queue() - return self._send_response_from_exception(exc_info) - - if wait_for_response: - # Targeted messages only have 1 response. - remote_response = self._wait_for_json_responses()[0] - return Response.from_json(self.ctxt, remote_response) - - -class _BroadcastMessage(_BaseMessage): - """A broadcast message. This means to call a method in every single - cell going in a certain direction. - """ - message_type = 'broadcast' - - def __init__(self, msg_runner, ctxt, method_name, method_kwargs, - direction, run_locally=True, **kwargs): - super(_BroadcastMessage, self).__init__(msg_runner, ctxt, - method_name, method_kwargs, direction, **kwargs) - # The local cell creating this message has the option - # to be able to process the message locally or not. - self.run_locally = run_locally - self.is_broadcast = True - - def _get_next_hops(self): - """Set the next hops and return the number of hops. The next - hops may include ourself. - """ - if self.hop_count >= self.max_hop_count: - return [] - if self.direction == 'down': - return self.state_manager.get_child_cells() - else: - return self.state_manager.get_parent_cells() - - def _send_to_cells(self, target_cells): - """Send a message to multiple cells.""" - for cell in target_cells: - cell.send_message(self) - - def _send_json_responses(self, json_responses): - """Responses to broadcast messages always need to go to the - neighbor cell from which we received this message. That - cell aggregates the responses and makes sure to forward them - to the correct source. - """ - return super(_BroadcastMessage, self)._send_json_responses( - json_responses, neighbor_only=True, fanout=True) - - def process(self): - """Process a broadcast message. This is called for all cells - that touch this message. - - The message is sent to all cells in the certain direction and - the creator of this message has the option of whether or not - to process it locally as well. - - If responses from all cells are required, each hop creates an - eventlet queue and waits for responses from its immediate - neighbor cells. All responses are then aggregated into a - single list and are returned to the neighbor cell until the - source is reached. - - When the source is reached, a list of Response instances are - returned to the caller. - - All exceptions for processing the message across the whole - routing path are caught and encoded within the Response and - returned to the caller. It is possible to get a mix of - successful responses and failure responses. The caller is - responsible for dealing with this. - """ - try: - next_hops = self._get_next_hops() - except Exception: - exc_info = sys.exc_info() - LOG.exception("Error locating next hops for message") - return self._send_response_from_exception(exc_info) - - # Short circuit if we don't need to respond - if not self.need_response: - if self.run_locally: - self._process_locally() - self._send_to_cells(next_hops) - return - - # We'll need to aggregate all of the responses (from ourself - # and our sibling cells) into 1 response - try: - self._setup_response_queue() - self._send_to_cells(next_hops) - except Exception: - # Error just trying to send to cells. Send a single response - # with the failure. - exc_info = sys.exc_info() - LOG.exception("Error sending message to next hops.") - self._cleanup_response_queue() - return self._send_response_from_exception(exc_info) - - if self.run_locally: - # Run locally and store the Response. - local_response = self._process_locally() - else: - local_response = None - - try: - remote_responses = self._wait_for_json_responses( - num_responses=len(next_hops)) - except Exception: - # Error waiting for responses, most likely a timeout. - # Send a single response back with the failure. - exc_info = sys.exc_info() - LOG.exception("Error waiting for responses from neighbor cells") - return self._send_response_from_exception(exc_info) - - if local_response: - remote_responses.append(local_response.to_json()) - return self._send_json_responses(remote_responses) - - -class _ResponseMessage(_TargetedMessage): - """A response message is really just a special targeted message, - saying to call 'parse_responses' when we reach the source of a 'call'. - - The 'fanout' attribute on this message may be true if we're responding - to a broadcast or if we're about to respond to the source of an - original target message. Because multiple nova-cells services may - be running within a cell, we need to make sure the response gets - back to the correct one, so we have to fanout. - """ - message_type = 'response' - - def __init__(self, msg_runner, ctxt, method_name, method_kwargs, - direction, target_cell, response_uuid, **kwargs): - super(_ResponseMessage, self).__init__(msg_runner, ctxt, - method_name, method_kwargs, direction, target_cell, **kwargs) - self.response_uuid = response_uuid - self.base_attrs_to_json.append('response_uuid') - - def process(self): - """Process a response. If the target is the local cell, process - the response here. Otherwise, forward it to where it needs to - go. - """ - next_hop = self._get_next_hop() - if next_hop.is_me: - self._process_locally() - return - if self.fanout is False: - # Really there's 1 more hop on each of these below, but - # it doesn't matter for this logic. - target_hops = self.target_cell.count(_PATH_CELL_SEP) - current_hops = self.routing_path.count(_PATH_CELL_SEP) - if current_hops + 1 == target_hops: - # Next hop is the target.. so we must fanout. See - # DocString above. - self.fanout = True - next_hop.send_message(self) - - -# -# Methods that may be called when processing messages after reaching -# a target cell. -# - - -class _BaseMessageMethods(base.Base): - """Base class for defining methods by message types.""" - def __init__(self, msg_runner): - super(_BaseMessageMethods, self).__init__() - self.msg_runner = msg_runner - self.state_manager = msg_runner.state_manager - self.compute_api = compute.API() - self.compute_rpcapi = compute_rpcapi.ComputeAPI() - self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() - self.host_api = compute.HostAPI() - - def task_log_get_all(self, message, task_name, period_beginning, - period_ending, host, state): - """Get task logs from the DB. The message could have - directly targeted this cell, or it could have been a broadcast - message. - - If 'host' is not None, filter by host. - If 'state' is not None, filter by state. - """ - task_logs = self.db.task_log_get_all(message.ctxt, task_name, - period_beginning, - period_ending, - host=host, - state=state) - return jsonutils.to_primitive(task_logs) - - -class _ResponseMessageMethods(_BaseMessageMethods): - """Methods that are called from a ResponseMessage. There's only - 1 method (parse_responses) and it is called when the message reaches - the source of a 'call'. All we do is stuff the response into the - eventlet queue to signal the caller that's waiting. - """ - def parse_responses(self, message, orig_message, responses): - self.msg_runner._put_response(message.response_uuid, - responses) - - -class _TargetedMessageMethods(_BaseMessageMethods): - """These are the methods that can be called when routing a message - to a specific cell. - """ - def __init__(self, *args, **kwargs): - super(_TargetedMessageMethods, self).__init__(*args, **kwargs) - - def build_instances(self, message, build_inst_kwargs): - """Parent cell told us to schedule new instance creation.""" - self.msg_runner.scheduler.build_instances(message, build_inst_kwargs) - - def run_compute_api_method(self, message, method_info): - """Run a method in the compute api class.""" - method = method_info['method'] - fn = getattr(self.compute_api, method, None) - if not fn: - detail = _("Unknown method '%(method)s' in compute API") - raise exception.CellServiceAPIMethodNotFound( - detail=detail % {'method': method}) - args = list(method_info['method_args']) - # 1st arg is instance_uuid that we need to turn into the - # instance object. - instance_uuid = args[0] - # NOTE: compute/api.py loads these when retrieving an instance for an - # API request, so there's a good chance that this is what was loaded. - expected_attrs = ['metadata', 'system_metadata', 'security_groups', - 'info_cache'] - - try: - instance = objects.Instance.get_by_uuid(message.ctxt, - instance_uuid, expected_attrs=expected_attrs) - args[0] = instance - except exception.InstanceNotFound: - with excutils.save_and_reraise_exception(): - # Must be a race condition. Let's try to resolve it by - # telling the top level cells that this instance doesn't - # exist. - instance = objects.Instance(context=message.ctxt, - uuid=instance_uuid) - self.msg_runner.instance_destroy_at_top(message.ctxt, - instance) - return fn(message.ctxt, *args, **method_info['method_kwargs']) - - def update_capabilities(self, message, cell_name, capabilities): - """A child cell told us about their capabilities.""" - LOG.debug("Received capabilities from child cell " - "%(cell_name)s: %(capabilities)s", - {'cell_name': cell_name, 'capabilities': capabilities}) - self.state_manager.update_cell_capabilities(cell_name, - capabilities) - # Go ahead and update our parents now that a child updated us - self.msg_runner.tell_parents_our_capabilities(message.ctxt) - - def update_capacities(self, message, cell_name, capacities): - """A child cell told us about their capacity.""" - LOG.debug("Received capacities from child cell " - "%(cell_name)s: %(capacities)s", - {'cell_name': cell_name, 'capacities': capacities}) - self.state_manager.update_cell_capacities(cell_name, - capacities) - # Go ahead and update our parents now that a child updated us - self.msg_runner.tell_parents_our_capacities(message.ctxt) - - def announce_capabilities(self, message): - """A parent cell has told us to send our capabilities, so let's - do so. - """ - self.msg_runner.tell_parents_our_capabilities(message.ctxt) - - def announce_capacities(self, message): - """A parent cell has told us to send our capacity, so let's - do so. - """ - self.msg_runner.tell_parents_our_capacities(message.ctxt) - - def service_get_by_compute_host(self, message, host_name): - """Return the service entry for a compute host.""" - return objects.Service.get_by_compute_host(message.ctxt, host_name) - - def service_update(self, message, host_name, binary, params_to_update): - """Used to enable/disable a service. For compute services, setting to - disabled stops new builds arriving on that host. - - :param host_name: the name of the host machine that the service is - running - :param binary: The name of the executable that the service runs as - :param params_to_update: eg. {'disabled': True} - """ - return self.host_api._service_update(message.ctxt, host_name, binary, - params_to_update) - - def service_delete(self, message, service_id): - """Deletes the specified service.""" - self.host_api._service_delete(message.ctxt, service_id) - - def proxy_rpc_to_manager(self, message, host_name, rpc_message, - topic, timeout): - """Proxy RPC to the given compute topic.""" - # Check that the host exists. - objects.Service.get_by_compute_host(message.ctxt, host_name) - - topic, _sep, server = topic.partition('.') - - cctxt = rpc.get_client(messaging.Target(topic=topic, - server=server or None)) - method = rpc_message['method'] - kwargs = rpc_message['args'] - - if message.need_response: - cctxt = cctxt.prepare(timeout=timeout) - return cctxt.call(message.ctxt, method, **kwargs) - else: - cctxt.cast(message.ctxt, method, **kwargs) - - def compute_node_get(self, message, compute_id): - """Get compute node by ID or UUID.""" - if uuidutils.is_uuid_like(compute_id): - return objects.ComputeNode.get_by_uuid(message.ctxt, compute_id) - return objects.ComputeNode.get_by_id(message.ctxt, compute_id) - - def actions_get(self, message, instance_uuid): - actions = self.db.actions_get(message.ctxt, instance_uuid) - return jsonutils.to_primitive(actions) - - def action_get_by_request_id(self, message, instance_uuid, request_id): - action = self.db.action_get_by_request_id(message.ctxt, instance_uuid, - request_id) - return jsonutils.to_primitive(action) - - def action_events_get(self, message, action_id): - action_events = self.db.action_events_get(message.ctxt, action_id) - return jsonutils.to_primitive(action_events) - - def validate_console_port(self, message, instance_uuid, console_port, - console_type): - """Validate console port with child cell compute node.""" - # 1st arg is instance_uuid that we need to turn into the - # instance object. - try: - instance = objects.Instance.get_by_uuid(message.ctxt, - instance_uuid) - except exception.InstanceNotFound: - with excutils.save_and_reraise_exception(): - # Must be a race condition. Let's try to resolve it by - # telling the top level cells that this instance doesn't - # exist. - instance = objects.Instance(context=message.ctxt, - uuid=instance_uuid) - self.msg_runner.instance_destroy_at_top(message.ctxt, - instance) - return self.compute_rpcapi.validate_console_port(message.ctxt, - instance, console_port, console_type) - - def get_migrations(self, message, filters): - return self.compute_api.get_migrations(message.ctxt, filters) - - def instance_update_from_api(self, message, instance, - expected_vm_state, - expected_task_state, - admin_state_reset): - """Update an instance in this cell.""" - if not admin_state_reset: - # NOTE(comstud): We don't want to nuke this cell's view - # of vm_state and task_state unless it's a forced reset - # via admin API. - instance.obj_reset_changes(['vm_state', 'task_state']) - # NOTE(alaski): A cell should be authoritative for its system_metadata - # and metadata so we don't want to sync it down from the api. - instance.obj_reset_changes(['metadata', 'system_metadata']) - with instance.skip_cells_sync(): - instance.save(expected_vm_state=expected_vm_state, - expected_task_state=expected_task_state) - - def _call_compute_api_with_obj(self, ctxt, instance, method, *args, - **kwargs): - try: - # NOTE(comstud): We need to refresh the instance from this - # cell's view in the DB. - instance.refresh() - except exception.InstanceNotFound: - with excutils.save_and_reraise_exception(): - # Must be a race condition. Let's try to resolve it by - # telling the top level cells that this instance doesn't - # exist. - instance = objects.Instance(context=ctxt, - uuid=instance.uuid) - self.msg_runner.instance_destroy_at_top(ctxt, - instance) - except exception.InstanceInfoCacheNotFound: - if method not in ('delete', 'force_delete'): - raise - - fn = getattr(self.compute_api, method, None) - return fn(ctxt, instance, *args, **kwargs) - - def start_instance(self, message, instance): - """Start an instance via compute_api.start().""" - self._call_compute_api_with_obj(message.ctxt, instance, 'start') - - def stop_instance(self, message, instance, clean_shutdown=True): - """Stop an instance via compute_api.stop().""" - do_cast = not message.need_response - return self._call_compute_api_with_obj(message.ctxt, instance, - 'stop', do_cast=do_cast, - clean_shutdown=clean_shutdown) - - def reboot_instance(self, message, instance, reboot_type): - """Reboot an instance via compute_api.reboot().""" - self._call_compute_api_with_obj(message.ctxt, instance, 'reboot', - reboot_type=reboot_type) - - def suspend_instance(self, message, instance): - """Suspend an instance via compute_api.suspend().""" - self._call_compute_api_with_obj(message.ctxt, instance, 'suspend') - - def resume_instance(self, message, instance): - """Resume an instance via compute_api.suspend().""" - self._call_compute_api_with_obj(message.ctxt, instance, 'resume') - - def get_host_uptime(self, message, host_name): - return self.host_api.get_host_uptime(message.ctxt, host_name) - - def terminate_instance(self, message, instance, delete_type='delete'): - self._call_compute_api_with_obj(message.ctxt, instance, delete_type) - - def soft_delete_instance(self, message, instance): - self._call_compute_api_with_obj(message.ctxt, instance, 'soft_delete') - - def pause_instance(self, message, instance): - """Pause an instance via compute_api.pause().""" - self._call_compute_api_with_obj(message.ctxt, instance, 'pause') - - def unpause_instance(self, message, instance): - """Unpause an instance via compute_api.pause().""" - self._call_compute_api_with_obj(message.ctxt, instance, 'unpause') - - def resize_instance(self, message, instance, flavor, - extra_instance_updates, clean_shutdown=True): - """Resize an instance via compute_api.resize().""" - self._call_compute_api_with_obj(message.ctxt, instance, 'resize', - flavor_id=flavor['flavorid'], - clean_shutdown=clean_shutdown, - **extra_instance_updates) - - def live_migrate_instance(self, message, instance, block_migration, - disk_over_commit, host_name): - """Live migrate an instance via compute_api.live_migrate().""" - self._call_compute_api_with_obj(message.ctxt, instance, - 'live_migrate', block_migration, - disk_over_commit, host_name) - - def revert_resize(self, message, instance): - """Revert a resize for an instance in its cell.""" - self._call_compute_api_with_obj(message.ctxt, instance, - 'revert_resize') - - def confirm_resize(self, message, instance): - """Confirm a resize for an instance in its cell.""" - self._call_compute_api_with_obj(message.ctxt, instance, - 'confirm_resize') - - def reset_network(self, message, instance): - """Reset networking for an instance in its cell.""" - self._call_compute_api_with_obj(message.ctxt, instance, - 'reset_network') - - def inject_network_info(self, message, instance): - """Inject networking for an instance in its cell.""" - self._call_compute_api_with_obj(message.ctxt, instance, - 'inject_network_info') - - def snapshot_instance(self, message, instance, image_id): - """Snapshot an instance in its cell.""" - instance.refresh() - instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING - instance.save(expected_task_state=[None]) - - objects.InstanceAction.action_start( - message.ctxt, instance.uuid, instance_actions.CREATE_IMAGE, - want_result=False) - - self.compute_rpcapi.snapshot_instance(message.ctxt, - instance, - image_id) - - def backup_instance(self, message, instance, image_id, - backup_type, rotation): - """Backup an instance in its cell.""" - instance.refresh() - instance.task_state = task_states.IMAGE_BACKUP - instance.save(expected_task_state=[None]) - - objects.InstanceAction.action_start( - message.ctxt, instance.uuid, instance_actions.BACKUP, - want_result=False) - - self.compute_rpcapi.backup_instance(message.ctxt, - instance, - image_id, - backup_type, - rotation) - - def rebuild_instance(self, message, instance, image_href, admin_password, - files_to_inject, preserve_ephemeral, kwargs): - kwargs['preserve_ephemeral'] = preserve_ephemeral - self._call_compute_api_with_obj(message.ctxt, instance, 'rebuild', - image_href, admin_password, - files_to_inject, **kwargs) - - def set_admin_password(self, message, instance, new_pass): - self._call_compute_api_with_obj(message.ctxt, instance, - 'set_admin_password', new_pass) - - -class _BroadcastMessageMethods(_BaseMessageMethods): - """These are the methods that can be called as a part of a broadcast - message. - """ - def _at_the_top(self): - """Are we the API level?""" - return not self.state_manager.get_parent_cells() - - def _get_expected_vm_state(self, instance): - """To attempt to address out-of-order messages, do some sanity - checking on the VM states. Add some requirements for - vm_state to the instance.save() call if necessary. - """ - expected_vm_state_map = { - # For updates containing 'vm_state' of 'building', - # only allow them to occur if the DB already says - # 'building' or if the vm_state is None. None - # really shouldn't be possible as instances always - # start out in 'building' anyway.. but just in case. - vm_states.BUILDING: [vm_states.BUILDING, None]} - - if instance.obj_attr_is_set('vm_state'): - return expected_vm_state_map.get(instance.vm_state) - - def _get_expected_task_state(self, instance): - """To attempt to address out-of-order messages, do some sanity - checking on the task states. Add some requirements for - task_state to the instance.save() call if necessary. - """ - expected_task_state_map = { - # Always allow updates when task_state doesn't change, - # but also make sure we don't set resize/rebuild task - # states for old messages when we've potentially already - # processed the ACTIVE/None messages. Ie, these checks - # will prevent stomping on any ACTIVE/None messages - # we already processed. - task_states.REBUILD_BLOCK_DEVICE_MAPPING: - [task_states.REBUILD_BLOCK_DEVICE_MAPPING, - task_states.REBUILDING], - task_states.REBUILD_SPAWNING: - [task_states.REBUILD_SPAWNING, - task_states.REBUILD_BLOCK_DEVICE_MAPPING, - task_states.REBUILDING], - task_states.RESIZE_MIGRATING: - [task_states.RESIZE_MIGRATING, - task_states.RESIZE_PREP], - task_states.RESIZE_MIGRATED: - [task_states.RESIZE_MIGRATED, - task_states.RESIZE_MIGRATING, - task_states.RESIZE_PREP], - task_states.RESIZE_FINISH: - [task_states.RESIZE_FINISH, - task_states.RESIZE_MIGRATED, - task_states.RESIZE_MIGRATING, - task_states.RESIZE_PREP]} - - if instance.obj_attr_is_set('task_state'): - return expected_task_state_map.get(instance.task_state) - - def instance_update_at_top(self, message, instance, **kwargs): - """Update an instance in the DB if we're a top level cell.""" - if not self._at_the_top(): - return - - # Remove things that we can't update in the top level cells. - # 'metadata' is only updated in the API cell, so don't overwrite - # it based on what child cells say. Make sure to update - # 'cell_name' based on the routing path. - items_to_remove = ['id', 'security_groups', 'volumes', 'cell_name', - 'name', 'metadata'] - instance.obj_reset_changes(items_to_remove) - instance.cell_name = _reverse_path(message.routing_path) - - # instance.display_name could be unicode - instance_repr = utils.get_obj_repr_unicode(instance) - LOG.debug("Got update for instance: %(instance)s", - {'instance': instance_repr}, instance_uuid=instance.uuid) - - expected_vm_state = self._get_expected_vm_state(instance) - expected_task_state = self._get_expected_task_state(instance) - - # It's possible due to some weird condition that the instance - # was already set as deleted... so we'll attempt to update - # it with permissions that allows us to read deleted. - with utils.temporary_mutation(message.ctxt, read_deleted="yes"): - try: - with instance.skip_cells_sync(): - instance.save(expected_vm_state=expected_vm_state, - expected_task_state=expected_task_state) - except exception.InstanceNotFound: - # FIXME(comstud): Strange. Need to handle quotas here, - # if we actually want this code to remain.. - instance.create() - except exception.NotFound: - # Can happen if we try to update a deleted instance's - # network information, for example. - pass - - def instance_destroy_at_top(self, message, instance, **kwargs): - """Destroy an instance from the DB if we're a top level cell.""" - if not self._at_the_top(): - return - LOG.debug("Got update to delete instance", - instance_uuid=instance.uuid) - try: - instance.destroy() - except exception.InstanceNotFound: - pass - except exception.ObjectActionError: - # NOTE(alaski): instance_destroy_at_top will sometimes be called - # when an instance does not exist in a cell but does in the parent. - # In that case instance.id is not set which causes instance.destroy - # to fail thinking that the object has already been destroyed. - # That's the right assumption for it to make because without cells - # that would be true. But for cells we'll try to pull the actual - # instance and try to delete it again. - try: - instance = objects.Instance.get_by_uuid(message.ctxt, - instance.uuid) - instance.destroy() - except exception.InstanceNotFound: - pass - - def instance_delete_everywhere(self, message, instance, delete_type, - **kwargs): - """Call compute API delete() or soft_delete() in every cell. - This is used when the API cell doesn't know what cell an instance - belongs to but the instance was requested to be deleted or - soft-deleted. So, we'll run it everywhere. - """ - LOG.debug("Got broadcast to %(delete_type)s delete instance", - {'delete_type': delete_type}, instance=instance) - if delete_type == 'soft': - self.compute_api.soft_delete(message.ctxt, instance) - else: - self.compute_api.delete(message.ctxt, instance) - - def instance_fault_create_at_top(self, message, instance_fault, **kwargs): - """Destroy an instance from the DB if we're a top level cell.""" - if not self._at_the_top(): - return - items_to_remove = ['id'] - for key in items_to_remove: - instance_fault.pop(key, None) - LOG.debug("Got message to create instance fault: %s", instance_fault) - fault = objects.InstanceFault(context=message.ctxt) - fault.update(instance_fault) - fault.create() - - def bw_usage_update_at_top(self, message, bw_update_info, **kwargs): - """Update Bandwidth usage in the DB if we're a top level cell.""" - if not self._at_the_top(): - return - self.db.bw_usage_update(message.ctxt, **bw_update_info) - - def _sync_instance(self, ctxt, instance): - if instance.deleted: - self.msg_runner.instance_destroy_at_top(ctxt, instance) - else: - self.msg_runner.instance_update_at_top(ctxt, instance) - - def sync_instances(self, message, project_id, updated_since, deleted, - **kwargs): - projid_str = project_id is None and "" or project_id - since_str = updated_since is None and "" or updated_since - LOG.info("Forcing a sync of instances, project_id=" - "%(projid_str)s, updated_since=%(since_str)s", - {'projid_str': projid_str, 'since_str': since_str}) - if updated_since is not None: - updated_since = timeutils.parse_isotime(updated_since) - instances = cells_utils.get_instances_to_sync(message.ctxt, - updated_since=updated_since, project_id=project_id, - deleted=deleted) - for instance in instances: - self._sync_instance(message.ctxt, instance) - - def service_get_all(self, message, filters): - if filters is None: - filters = {} - disabled = filters.pop('disabled', None) - services = objects.ServiceList.get_all(message.ctxt, disabled=disabled) - ret_services = [] - for service in services: - for key, val in filters.items(): - if getattr(service, key) != val: - break - else: - ret_services.append(service) - return ret_services - - def compute_node_get_all(self, message, hypervisor_match): - """Return compute nodes in this cell.""" - if hypervisor_match is not None: - return objects.ComputeNodeList.get_by_hypervisor(message.ctxt, - hypervisor_match) - return objects.ComputeNodeList.get_all(message.ctxt) - - def compute_node_stats(self, message): - """Return compute node stats from this cell.""" - return self.db.compute_node_statistics(message.ctxt) - - def consoleauth_delete_tokens(self, message, instance_uuid): - """Delete consoleauth tokens for an instance in API cells.""" - if not self._at_the_top(): - return - self.consoleauth_rpcapi.delete_tokens_for_instance(message.ctxt, - instance_uuid) - - def bdm_update_or_create_at_top(self, message, bdm, create): - """Create or update a block device mapping in API cells. If - create is True, only try to create. If create is None, try to - update but fall back to create. If create is False, only attempt - to update. This maps to nova-conductor's behavior. - """ - if not self._at_the_top(): - return - items_to_remove = ['id'] - for key in items_to_remove: - bdm.pop(key, None) - if create is None: - LOG.debug('Calling db.block_device_mapping_update_or_create from ' - 'API cell with values: %s', bdm) - self.db.block_device_mapping_update_or_create(message.ctxt, - bdm, - legacy=False) - return - elif create is True: - LOG.debug('Calling db.block_device_mapping_create from API ' - 'cell with values: %s', bdm) - self.db.block_device_mapping_create(message.ctxt, bdm, - legacy=False) - return - # Unfortunately this update call wants BDM ID... but we don't know - # what it is in this cell. Search for it.. try matching either - # device_name or volume_id. - dev_name = bdm['device_name'] - vol_id = bdm['volume_id'] - instance_bdms = self.db.block_device_mapping_get_all_by_instance( - message.ctxt, bdm['instance_uuid']) - for instance_bdm in instance_bdms: - if dev_name and instance_bdm['device_name'] == dev_name: - break - if vol_id and instance_bdm['volume_id'] == vol_id: - break - else: - LOG.warning("No match when trying to update BDM: %(bdm)s", - dict(bdm=bdm)) - return - LOG.debug('Calling db.block_device_mapping_update from API cell with ' - 'bdm id %s and values: %s', instance_bdm['id'], bdm) - self.db.block_device_mapping_update(message.ctxt, - instance_bdm['id'], bdm, - legacy=False) - - def bdm_destroy_at_top(self, message, instance_uuid, device_name, - volume_id): - """Destroy a block device mapping in API cells by device name - or volume_id. device_name or volume_id can be None, but not both. - """ - if not self._at_the_top(): - return - if device_name: - self.db.block_device_mapping_destroy_by_instance_and_device( - message.ctxt, instance_uuid, device_name) - elif volume_id: - self.db.block_device_mapping_destroy_by_instance_and_volume( - message.ctxt, instance_uuid, volume_id) - - def get_migrations(self, message, filters): - return self.compute_api.get_migrations(message.ctxt, filters) - - def get_keypair_at_top(self, message, user_id, name): - """Get keypair in API cells by name. Just return None if there is - no match keypair. - """ - if not self._at_the_top(): - return - - try: - return objects.KeyPair.get_by_name(message.ctxt, user_id, name) - except exception.KeypairNotFound: - pass - - -_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage, - 'broadcast': _BroadcastMessage, - 'response': _ResponseMessage} -_CELL_MESSAGE_TYPE_TO_METHODS_CLS = {'targeted': _TargetedMessageMethods, - 'broadcast': _BroadcastMessageMethods, - 'response': _ResponseMessageMethods} - - -# -# Below are the public interfaces into this module. -# - - -class MessageRunner(object): - """This class is the main interface into creating messages and - processing them. - - Public methods in this class are typically called by the CellsManager - to create a new message and process it with the exception of - 'message_from_json' which should be used by CellsDrivers to convert - a JSONified message it has received back into the appropriate Message - class. - - Private methods are used internally when we need to keep some - 'global' state. For instance, eventlet queues used for responses are - held in this class. Also, when a Message is process()ed above and - it's determined we should take action locally, - _process_message_locally() will be called. - - When needing to add a new method to call in a Cell2Cell message, - define the new method below and also add it to the appropriate - MessageMethods class where the real work will be done. - """ - - def __init__(self, state_manager): - self.state_manager = state_manager - cells_scheduler_cls = importutils.import_class( - CONF.cells.scheduler) - self.scheduler = cells_scheduler_cls(self) - self.response_queues = {} - self.methods_by_type = {} - self.our_name = CONF.cells.name - for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.items(): - self.methods_by_type[msg_type] = cls(self) - self.serializer = objects_base.NovaObjectSerializer() - - def _process_message_locally(self, message): - """Message processing will call this when its determined that - the message should be processed within this cell. Find the - method to call based on the message type, and call it. The - caller is responsible for catching exceptions and returning - results to cells, if needed. - """ - methods = self.methods_by_type[message.message_type] - fn = getattr(methods, message.method_name) - return fn(message, **message.method_kwargs) - - def _put_response(self, response_uuid, response): - """Put a response into a response queue. This is called when - a _ResponseMessage is processed in the cell that initiated a - 'call' to another cell. - """ - resp_queue = self.response_queues.get(response_uuid) - if not resp_queue: - # Response queue is gone. We must have restarted or we - # received a response after our timeout period. - return - resp_queue.put(response) - - def _setup_response_queue(self, message): - """Set up an eventlet queue to use to wait for replies. - - Replies come back from the target cell as a _ResponseMessage - being sent back to the source. - """ - resp_queue = queue.Queue() - self.response_queues[message.uuid] = resp_queue - return resp_queue - - def _cleanup_response_queue(self, message): - """Stop tracking the response queue either because we're - done receiving responses, or we've timed out. - """ - try: - del self.response_queues[message.uuid] - except KeyError: - # Ignore if queue is gone already somehow. - pass - - def _create_response_message(self, ctxt, direction, target_cell, - response_uuid, response_kwargs, **kwargs): - """Create a ResponseMessage. This is used internally within - the nova.cells.messaging module. - """ - return _ResponseMessage(self, ctxt, 'parse_responses', - response_kwargs, direction, target_cell, - response_uuid, **kwargs) - - def _get_migrations_for_cell(self, ctxt, cell_name, filters): - method_kwargs = dict(filters=filters) - message = _TargetedMessage(self, ctxt, 'get_migrations', - method_kwargs, 'down', cell_name, - need_response=True) - - response = message.process() - if response.failure and isinstance(response.value[1], - exception.CellRoutingInconsistency): - return [] - - return [response] - - def message_from_json(self, json_message): - """Turns a message in JSON format into an appropriate Message - instance. This is called when cells receive a message from - another cell. - """ - message_dict = jsonutils.loads(json_message) - # Need to convert context back. - ctxt = message_dict['ctxt'] - message_dict['ctxt'] = context.RequestContext.from_dict(ctxt) - # NOTE(comstud): We also need to re-serialize any objects that - # exist in 'method_kwargs'. - method_kwargs = message_dict['method_kwargs'] - for k, v in method_kwargs.items(): - method_kwargs[k] = self.serializer.deserialize_entity( - message_dict['ctxt'], v) - message_type = message_dict.pop('message_type') - message_cls = _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS[message_type] - return message_cls(self, **message_dict) - - def ask_children_for_capabilities(self, ctxt): - """Tell child cells to send us capabilities. This is typically - called on startup of the nova-cells service. - """ - child_cells = self.state_manager.get_child_cells() - for child_cell in child_cells: - message = _TargetedMessage(self, ctxt, - 'announce_capabilities', - dict(), 'down', child_cell) - message.process() - - def ask_children_for_capacities(self, ctxt): - """Tell child cells to send us capacities. This is typically - called on startup of the nova-cells service. - """ - child_cells = self.state_manager.get_child_cells() - for child_cell in child_cells: - message = _TargetedMessage(self, ctxt, 'announce_capacities', - dict(), 'down', child_cell) - message.process() - - def tell_parents_our_capabilities(self, ctxt): - """Send our capabilities to parent cells.""" - parent_cells = self.state_manager.get_parent_cells() - if not parent_cells: - return - my_cell_info = self.state_manager.get_my_state() - capabs = self.state_manager.get_our_capabilities() - parent_cell_names = ','.join(x.name for x in parent_cells) - LOG.debug("Updating parents [%(parent_cell_names)s] with " - "our capabilities: %(capabs)s", - {'parent_cell_names': parent_cell_names, - 'capabs': capabs}) - # We have to turn the sets into lists so they can potentially - # be json encoded when the raw message is sent. - for key, values in capabs.items(): - capabs[key] = list(values) - method_kwargs = {'cell_name': my_cell_info.name, - 'capabilities': capabs} - for cell in parent_cells: - message = _TargetedMessage(self, ctxt, 'update_capabilities', - method_kwargs, 'up', cell, fanout=True) - message.process() - - def tell_parents_our_capacities(self, ctxt): - """Send our capacities to parent cells.""" - parent_cells = self.state_manager.get_parent_cells() - if not parent_cells: - return - my_cell_info = self.state_manager.get_my_state() - capacities = self.state_manager.get_our_capacities() - parent_cell_names = ','.join(x.name for x in parent_cells) - LOG.debug("Updating parents [%(parent_cell_names)s] with " - "our capacities: %(capacities)s", - {'parent_cell_names': parent_cell_names, - 'capacities': capacities}) - method_kwargs = {'cell_name': my_cell_info.name, - 'capacities': capacities} - for cell in parent_cells: - message = _TargetedMessage(self, ctxt, 'update_capacities', - method_kwargs, 'up', cell, fanout=True) - message.process() - - def build_instances(self, ctxt, target_cell, build_inst_kwargs): - """Called by the cell scheduler to tell a child cell to build - instance(s). - """ - method_kwargs = dict(build_inst_kwargs=build_inst_kwargs) - message = _TargetedMessage(self, ctxt, 'build_instances', - method_kwargs, 'down', target_cell) - message.process() - - def run_compute_api_method(self, ctxt, cell_name, method_info, call): - """Call a compute API method in a specific cell.""" - message = _TargetedMessage(self, ctxt, 'run_compute_api_method', - dict(method_info=method_info), 'down', - cell_name, need_response=call) - return message.process() - - def instance_update_at_top(self, ctxt, instance): - """Update an instance at the top level cell.""" - message = _BroadcastMessage(self, ctxt, 'instance_update_at_top', - dict(instance=instance), 'up', - run_locally=False) - message.process() - - def instance_destroy_at_top(self, ctxt, instance): - """Destroy an instance at the top level cell.""" - message = _BroadcastMessage(self, ctxt, 'instance_destroy_at_top', - dict(instance=instance), 'up', - run_locally=False) - message.process() - - def instance_delete_everywhere(self, ctxt, instance, delete_type): - """This is used by API cell when it didn't know what cell - an instance was in, but the instance was requested to be - deleted or soft_deleted. So, we'll broadcast this everywhere. - """ - method_kwargs = dict(instance=instance, delete_type=delete_type) - message = _BroadcastMessage(self, ctxt, - 'instance_delete_everywhere', - method_kwargs, 'down', - run_locally=False) - message.process() - - def instance_fault_create_at_top(self, ctxt, instance_fault): - """Create an instance fault at the top level cell.""" - message = _BroadcastMessage(self, ctxt, - 'instance_fault_create_at_top', - dict(instance_fault=instance_fault), - 'up', run_locally=False) - message.process() - - def bw_usage_update_at_top(self, ctxt, bw_update_info): - """Update bandwidth usage at top level cell.""" - message = _BroadcastMessage(self, ctxt, 'bw_usage_update_at_top', - dict(bw_update_info=bw_update_info), - 'up', run_locally=False) - message.process() - - def sync_instances(self, ctxt, project_id, updated_since, deleted): - """Force a sync of all instances, potentially by project_id, - and potentially since a certain date/time. - """ - method_kwargs = dict(project_id=project_id, - updated_since=updated_since, - deleted=deleted) - message = _BroadcastMessage(self, ctxt, 'sync_instances', - method_kwargs, 'down', - run_locally=False) - message.process() - - def service_get_all(self, ctxt, filters=None): - method_kwargs = dict(filters=filters) - message = _BroadcastMessage(self, ctxt, 'service_get_all', - method_kwargs, 'down', - run_locally=True, need_response=True) - return message.process() - - def service_get_by_compute_host(self, ctxt, cell_name, host_name): - method_kwargs = dict(host_name=host_name) - message = _TargetedMessage(self, ctxt, - 'service_get_by_compute_host', - method_kwargs, 'down', cell_name, - need_response=True) - return message.process() - - def get_host_uptime(self, ctxt, cell_name, host_name): - method_kwargs = dict(host_name=host_name) - message = _TargetedMessage(self, ctxt, - 'get_host_uptime', - method_kwargs, 'down', cell_name, - need_response=True) - return message.process() - - def service_update(self, ctxt, cell_name, host_name, binary, - params_to_update): - """Used to enable/disable a service. For compute services, setting to - disabled stops new builds arriving on that host. - - :param host_name: the name of the host machine that the service is - running - :param binary: The name of the executable that the service runs as - :param params_to_update: eg. {'disabled': True} - :returns: the update service object - """ - method_kwargs = dict(host_name=host_name, binary=binary, - params_to_update=params_to_update) - message = _TargetedMessage(self, ctxt, - 'service_update', - method_kwargs, 'down', cell_name, - need_response=True) - return message.process() - - def service_delete(self, ctxt, cell_name, service_id): - """Deletes the specified service.""" - method_kwargs = {'service_id': service_id} - message = _TargetedMessage(self, ctxt, - 'service_delete', - method_kwargs, 'down', cell_name, - need_response=True) - message.process() - - def proxy_rpc_to_manager(self, ctxt, cell_name, host_name, topic, - rpc_message, call, timeout): - method_kwargs = {'host_name': host_name, - 'topic': topic, - 'rpc_message': rpc_message, - 'timeout': timeout} - message = _TargetedMessage(self, ctxt, - 'proxy_rpc_to_manager', - method_kwargs, 'down', cell_name, - need_response=call) - return message.process() - - def task_log_get_all(self, ctxt, cell_name, task_name, - period_beginning, period_ending, - host=None, state=None): - """Get task logs from the DB from all cells or a particular - cell. - - If 'cell_name' is None or '', get responses from all cells. - If 'host' is not None, filter by host. - If 'state' is not None, filter by state. - - Return a list of Response objects. - """ - method_kwargs = dict(task_name=task_name, - period_beginning=period_beginning, - period_ending=period_ending, - host=host, state=state) - if cell_name: - message = _TargetedMessage(self, ctxt, 'task_log_get_all', - method_kwargs, 'down', - cell_name, need_response=True) - # Caller should get a list of Responses. - return [message.process()] - message = _BroadcastMessage(self, ctxt, 'task_log_get_all', - method_kwargs, 'down', - run_locally=True, need_response=True) - return message.process() - - def compute_node_get_all(self, ctxt, hypervisor_match=None): - """Return list of compute nodes in all child cells.""" - method_kwargs = dict(hypervisor_match=hypervisor_match) - message = _BroadcastMessage(self, ctxt, 'compute_node_get_all', - method_kwargs, 'down', - run_locally=True, need_response=True) - return message.process() - - def compute_node_stats(self, ctxt): - """Return compute node stats from all child cells.""" - method_kwargs = dict() - message = _BroadcastMessage(self, ctxt, 'compute_node_stats', - method_kwargs, 'down', - run_locally=True, need_response=True) - return message.process() - - def compute_node_get(self, ctxt, cell_name, compute_id): - """Return compute node entry from a specific cell by ID or UUID.""" - method_kwargs = dict(compute_id=compute_id) - message = _TargetedMessage(self, ctxt, 'compute_node_get', - method_kwargs, 'down', - cell_name, need_response=True) - return message.process() - - def actions_get(self, ctxt, cell_name, instance_uuid): - method_kwargs = dict(instance_uuid=instance_uuid) - message = _TargetedMessage(self, ctxt, 'actions_get', - method_kwargs, 'down', - cell_name, need_response=True) - return message.process() - - def action_get_by_request_id(self, ctxt, cell_name, instance_uuid, - request_id): - method_kwargs = dict(instance_uuid=instance_uuid, - request_id=request_id) - message = _TargetedMessage(self, ctxt, 'action_get_by_request_id', - method_kwargs, 'down', - cell_name, need_response=True) - return message.process() - - def action_events_get(self, ctxt, cell_name, action_id): - method_kwargs = dict(action_id=action_id) - message = _TargetedMessage(self, ctxt, 'action_events_get', - method_kwargs, 'down', - cell_name, need_response=True) - return message.process() - - def consoleauth_delete_tokens(self, ctxt, instance_uuid): - """Delete consoleauth tokens for an instance in API cells.""" - message = _BroadcastMessage(self, ctxt, 'consoleauth_delete_tokens', - dict(instance_uuid=instance_uuid), - 'up', run_locally=False) - message.process() - - def validate_console_port(self, ctxt, cell_name, instance_uuid, - console_port, console_type): - """Validate console port with child cell compute node.""" - method_kwargs = {'instance_uuid': instance_uuid, - 'console_port': console_port, - 'console_type': console_type} - message = _TargetedMessage(self, ctxt, 'validate_console_port', - method_kwargs, 'down', - cell_name, need_response=True) - return message.process() - - def bdm_update_or_create_at_top(self, ctxt, bdm, create=None): - """Update/Create a BDM at top level cell.""" - message = _BroadcastMessage(self, ctxt, - 'bdm_update_or_create_at_top', - dict(bdm=bdm, create=create), - 'up', run_locally=False) - message.process() - - def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None, - volume_id=None): - """Destroy a BDM at top level cell.""" - method_kwargs = dict(instance_uuid=instance_uuid, - device_name=device_name, - volume_id=volume_id) - message = _BroadcastMessage(self, ctxt, 'bdm_destroy_at_top', - method_kwargs, - 'up', run_locally=False) - message.process() - - def get_migrations(self, ctxt, cell_name, run_locally, filters): - """Fetch all migrations applying the filters for a given cell or all - cells. - """ - method_kwargs = dict(filters=filters) - if cell_name: - return self._get_migrations_for_cell(ctxt, cell_name, filters) - - message = _BroadcastMessage(self, ctxt, 'get_migrations', - method_kwargs, 'down', - run_locally=run_locally, - need_response=True) - return message.process() - - def _instance_action(self, ctxt, instance, method, extra_kwargs=None, - need_response=False): - """Call instance_ in correct cell for instance.""" - cell_name = instance.cell_name - if not cell_name: - LOG.warning("No cell_name for %(method)s() from API", - dict(method=method), instance=instance) - return - method_kwargs = {'instance': instance} - if extra_kwargs: - method_kwargs.update(extra_kwargs) - message = _TargetedMessage(self, ctxt, method, method_kwargs, - 'down', cell_name, - need_response=need_response) - return message.process() - - def instance_update_from_api(self, ctxt, instance, - expected_vm_state, expected_task_state, - admin_state_reset): - """Update an instance object in its cell.""" - cell_name = instance.cell_name - if not cell_name: - LOG.warning("No cell_name for instance update from API", - instance=instance) - return - method_kwargs = {'instance': instance, - 'expected_vm_state': expected_vm_state, - 'expected_task_state': expected_task_state, - 'admin_state_reset': admin_state_reset} - message = _TargetedMessage(self, ctxt, 'instance_update_from_api', - method_kwargs, 'down', - cell_name) - message.process() - - def start_instance(self, ctxt, instance): - """Start an instance in its cell.""" - self._instance_action(ctxt, instance, 'start_instance') - - def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True): - """Stop an instance in its cell.""" - extra_kwargs = dict(clean_shutdown=clean_shutdown) - if do_cast: - self._instance_action(ctxt, instance, 'stop_instance', - extra_kwargs=extra_kwargs) - else: - return self._instance_action(ctxt, instance, 'stop_instance', - extra_kwargs=extra_kwargs, - need_response=True) - - def reboot_instance(self, ctxt, instance, reboot_type): - """Reboot an instance in its cell.""" - extra_kwargs = dict(reboot_type=reboot_type) - self._instance_action(ctxt, instance, 'reboot_instance', - extra_kwargs=extra_kwargs) - - def suspend_instance(self, ctxt, instance): - """Suspend an instance in its cell.""" - self._instance_action(ctxt, instance, 'suspend_instance') - - def resume_instance(self, ctxt, instance): - """Resume an instance in its cell.""" - self._instance_action(ctxt, instance, 'resume_instance') - - def terminate_instance(self, ctxt, instance, delete_type='delete'): - extra_kwargs = dict(delete_type=delete_type) - self._instance_action(ctxt, instance, 'terminate_instance', - extra_kwargs=extra_kwargs) - - def soft_delete_instance(self, ctxt, instance): - self._instance_action(ctxt, instance, 'soft_delete_instance') - - def pause_instance(self, ctxt, instance): - """Pause an instance in its cell.""" - self._instance_action(ctxt, instance, 'pause_instance') - - def unpause_instance(self, ctxt, instance): - """Unpause an instance in its cell.""" - self._instance_action(ctxt, instance, 'unpause_instance') - - def resize_instance(self, ctxt, instance, flavor, - extra_instance_updates, - clean_shutdown=True): - """Resize an instance in its cell.""" - extra_kwargs = dict(flavor=flavor, - extra_instance_updates=extra_instance_updates, - clean_shutdown=clean_shutdown) - self._instance_action(ctxt, instance, 'resize_instance', - extra_kwargs=extra_kwargs) - - def live_migrate_instance(self, ctxt, instance, block_migration, - disk_over_commit, host_name): - """Live migrate an instance in its cell.""" - extra_kwargs = dict(block_migration=block_migration, - disk_over_commit=disk_over_commit, - host_name=host_name) - self._instance_action(ctxt, instance, 'live_migrate_instance', - extra_kwargs=extra_kwargs) - - def revert_resize(self, ctxt, instance): - """Revert a resize for an instance in its cell.""" - self._instance_action(ctxt, instance, 'revert_resize') - - def confirm_resize(self, ctxt, instance): - """Confirm a resize for an instance in its cell.""" - self._instance_action(ctxt, instance, 'confirm_resize') - - def reset_network(self, ctxt, instance): - """Reset networking for an instance in its cell.""" - self._instance_action(ctxt, instance, 'reset_network') - - def inject_network_info(self, ctxt, instance): - """Inject networking for an instance in its cell.""" - self._instance_action(ctxt, instance, 'inject_network_info') - - def snapshot_instance(self, ctxt, instance, image_id): - """Snapshot an instance in its cell.""" - extra_kwargs = dict(image_id=image_id) - self._instance_action(ctxt, instance, 'snapshot_instance', - extra_kwargs=extra_kwargs) - - def backup_instance(self, ctxt, instance, image_id, backup_type, - rotation): - """Backup an instance in its cell.""" - extra_kwargs = dict(image_id=image_id, backup_type=backup_type, - rotation=rotation) - self._instance_action(ctxt, instance, 'backup_instance', - extra_kwargs=extra_kwargs) - - def rebuild_instance(self, ctxt, instance, image_href, admin_password, - files_to_inject, preserve_ephemeral, kwargs): - extra_kwargs = dict(image_href=image_href, - admin_password=admin_password, - files_to_inject=files_to_inject, - preserve_ephemeral=preserve_ephemeral, - kwargs=kwargs) - self._instance_action(ctxt, instance, 'rebuild_instance', - extra_kwargs=extra_kwargs) - - def set_admin_password(self, ctxt, instance, new_pass): - self._instance_action(ctxt, instance, 'set_admin_password', - extra_kwargs={'new_pass': new_pass}) - - def get_keypair_at_top(self, ctxt, user_id, name): - """Get Key Pair by name at top level cell.""" - message = _BroadcastMessage(self, ctxt, - 'get_keypair_at_top', - dict(user_id=user_id, name=name), - 'up', - need_response=True, run_locally=False) - return message.process() - - @staticmethod - def get_message_types(): - return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys() - - -class Response(object): - """Holds a response from a cell. If there was a failure, 'failure' - will be True and 'response' will contain an encoded Exception. - """ - def __init__(self, ctxt, cell_name, value, failure): - self.failure = failure - self.cell_name = cell_name - self.value = value - self.ctxt = ctxt - self.serializer = objects_base.NovaObjectSerializer() - - def to_json(self): - resp_value = self.serializer.serialize_entity(self.ctxt, self.value) - if self.failure: - resp_value = serialize_remote_exception(resp_value, - log_failure=False) - _dict = {'cell_name': self.cell_name, - 'value': resp_value, - 'failure': self.failure} - return jsonutils.dumps(_dict) - - @classmethod - def from_json(cls, ctxt, json_message): - _dict = jsonutils.loads(json_message) - if _dict['failure']: - resp_value = deserialize_remote_exception(_dict['value'], - rpc.get_allowed_exmods()) - _dict['value'] = resp_value - response = cls(ctxt, **_dict) - response.value = response.serializer.deserialize_entity( - response.ctxt, response.value) - return response - - def value_or_raise(self): - if self.failure: - if isinstance(self.value, (tuple, list)): - six.reraise(*self.value) - else: - raise self.value - return self.value - - -_REMOTE_POSTFIX = '_Remote' - - -def serialize_remote_exception(failure_info, log_failure=True): - """Prepares exception data to be sent over rpc. - - Failure_info should be a sys.exc_info() tuple. - - """ - tb = traceback.format_exception(*failure_info) - failure = failure_info[1] - if log_failure: - LOG.error("Returning exception %s to caller", - six.text_type(failure)) - LOG.error(tb) - - kwargs = {} - if hasattr(failure, 'kwargs'): - kwargs = failure.kwargs - - # NOTE(matiu): With cells, it's possible to re-raise remote, remote - # exceptions. Lets turn it back into the original exception type. - cls_name = str(failure.__class__.__name__) - mod_name = str(failure.__class__.__module__) - if (cls_name.endswith(_REMOTE_POSTFIX) and - mod_name.endswith(_REMOTE_POSTFIX)): - cls_name = cls_name[:-len(_REMOTE_POSTFIX)] - mod_name = mod_name[:-len(_REMOTE_POSTFIX)] - - data = { - 'class': cls_name, - 'module': mod_name, - 'message': six.text_type(failure), - 'tb': tb, - 'args': failure.args, - 'kwargs': kwargs - } - - json_data = jsonutils.dumps(data) - - return json_data - - -def deserialize_remote_exception(data, allowed_remote_exmods): - failure = jsonutils.loads(str(data)) - - trace = failure.get('tb', []) - message = failure.get('message', "") + "\n" + "\n".join(trace) - name = failure.get('class') - module = failure.get('module') - - # NOTE(ameade): We DO NOT want to allow just any module to be imported, in - # order to prevent arbitrary code execution. - if module != 'exceptions' and module not in allowed_remote_exmods: - return messaging.RemoteError(name, failure.get('message'), trace) - - try: - mod = importutils.import_module(module) - klass = getattr(mod, name) - if not issubclass(klass, Exception): - raise TypeError("Can only deserialize Exceptions") - - failure = klass(*failure.get('args', []), **failure.get('kwargs', {})) - except (AttributeError, TypeError, ImportError): - return messaging.RemoteError(name, failure.get('message'), trace) - - ex_type = type(failure) - str_override = lambda self: message - new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,), - {'__str__': str_override, '__unicode__': str_override}) - new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX) - try: - # NOTE(ameade): Dynamically create a new exception type and swap it in - # as the new type for the exception. This only works on user defined - # Exceptions and not core python exceptions. This is important because - # we cannot necessarily change an exception message so we must override - # the __str__ method. - failure.__class__ = new_ex_type - except TypeError: - # NOTE(ameade): If a core exception then just add the traceback to the - # first exception argument. - failure.args = (message,) + failure.args[1:] - return failure diff --git a/nova/cells/opts.py b/nova/cells/opts.py deleted file mode 100644 index f9d122d5e46..00000000000 --- a/nova/cells/opts.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Global cells config options -""" -import nova.conf - -CONF = nova.conf.CONF - - -def get_cell_type(): - """Return the cell type, 'api', 'compute', or None (if cells is disabled). - """ - if not CONF.cells.enable: - return - return CONF.cells.cell_type diff --git a/nova/cells/rpc_driver.py b/nova/cells/rpc_driver.py deleted file mode 100644 index 3f45af6e32f..00000000000 --- a/nova/cells/rpc_driver.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cells RPC Communication Driver -""" -import oslo_messaging as messaging - -from nova.cells import driver -import nova.conf -from nova import rpc - - -CONF = nova.conf.CONF - - -class CellsRPCDriver(driver.BaseCellsDriver): - """Driver for cell<->cell communication via RPC. This is used to - setup the RPC consumers as well as to send a message to another cell. - - One instance of this class will be created for every neighbor cell - that we find in the DB and it will be associated with the cell in - its CellState. - - One instance is also created by the cells manager for setting up - the consumers. - """ - - def __init__(self, *args, **kwargs): - super(CellsRPCDriver, self).__init__(*args, **kwargs) - self.rpc_servers = [] - self.intercell_rpcapi = InterCellRPCAPI() - - def start_servers(self, msg_runner): - """Start RPC servers. - - Start up 2 separate servers for handling inter-cell - communication via RPC. Both handle the same types of - messages, but requests/replies are separated to solve - potential deadlocks. (If we used the same queue for both, - it's possible to exhaust the RPC thread pool while we wait - for replies.. such that we'd never consume a reply.) - """ - topic_base = CONF.cells.rpc_driver_queue_base - proxy_manager = InterCellRPCDispatcher(msg_runner) - for msg_type in msg_runner.get_message_types(): - target = messaging.Target(topic='%s.%s' % (topic_base, msg_type), - server=CONF.host) - # NOTE(comstud): We do not need to use the object serializer - # on this because object serialization is taken care for us in - # the nova.cells.messaging module. - server = rpc.get_server(target, endpoints=[proxy_manager]) - server.start() - self.rpc_servers.append(server) - - def stop_servers(self): - """Stop RPC servers. - - NOTE: Currently there's no hooks when stopping services - to have managers cleanup, so this is not currently called. - """ - for server in self.rpc_servers: - server.stop() - - def send_message_to_cell(self, cell_state, message): - """Use the IntercellRPCAPI to send a message to a cell.""" - self.intercell_rpcapi.send_message_to_cell(cell_state, message) - - -class InterCellRPCAPI(object): - """Client side of the Cell<->Cell RPC API. - - The CellsRPCDriver uses this to make calls to another cell. - - API version history: - 1.0 - Initial version. - - ... Grizzly supports message version 1.0. So, any changes to existing - methods in 2.x after that point should be done such that they can - handle the version_cap being set to 1.0. - """ - - VERSION_ALIASES = { - 'grizzly': '1.0', - } - - def __init__(self): - super(InterCellRPCAPI, self).__init__() - self.version_cap = ( - self.VERSION_ALIASES.get(CONF.upgrade_levels.intercell, - CONF.upgrade_levels.intercell)) - self.transports = {} - - def _get_client(self, next_hop, topic): - """Turn the DB information for a cell into a messaging.RPCClient.""" - transport = self._get_transport(next_hop) - target = messaging.Target(topic=topic, version='1.0') - serializer = rpc.RequestContextSerializer(None) - return messaging.RPCClient(transport, - target, - version_cap=self.version_cap, - serializer=serializer) - - def _get_transport(self, next_hop): - """NOTE(belliott) Each Transport object contains connection pool - state. Maintain references to them to avoid continual reconnects - to the message broker. - """ - transport_url = next_hop.db_info['transport_url'] - if transport_url not in self.transports: - transport = messaging.get_rpc_transport( - nova.conf.CONF, transport_url) - self.transports[transport_url] = transport - else: - transport = self.transports[transport_url] - - return transport - - def send_message_to_cell(self, cell_state, message): - """Send a message to another cell by JSON-ifying the message and - making an RPC cast to 'process_message'. If the message says to - fanout, do it. The topic that is used will be - 'CONF.rpc_driver_queue_base.'. - """ - topic_base = CONF.cells.rpc_driver_queue_base - topic = '%s.%s' % (topic_base, message.message_type) - cctxt = self._get_client(cell_state, topic) - if message.fanout: - cctxt = cctxt.prepare(fanout=message.fanout) - return cctxt.cast(message.ctxt, 'process_message', - message=message.to_json()) - - -class InterCellRPCDispatcher(object): - """RPC Dispatcher to handle messages received from other cells. - - All messages received here have come from a sibling cell. Depending - on the ultimate target and type of message, we may process the message - in this cell, relay the message to another sibling cell, or both. This - logic is defined by the message class in the nova.cells.messaging module. - """ - - target = messaging.Target(version='1.0') - - def __init__(self, msg_runner): - """Init the Intercell RPC Dispatcher.""" - self.msg_runner = msg_runner - - def process_message(self, _ctxt, message): - """We received a message from another cell. Use the MessageRunner - to turn this from JSON back into an instance of the correct - Message class. Then process it! - """ - message = self.msg_runner.message_from_json(message) - message.process() diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py deleted file mode 100644 index f2f0adc211e..00000000000 --- a/nova/cells/rpcapi.py +++ /dev/null @@ -1,676 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Client side of nova-cells RPC API (for talking to the nova-cells service -within a cell). - -This is different than communication between child and parent nova-cells -services. That communication is handled by the cells driver via the -messaging module. -""" - -from oslo_log import log as logging -import oslo_messaging as messaging -from oslo_serialization import jsonutils -from oslo_utils import uuidutils - -from nova import cells -import nova.conf -from nova import exception -from nova import objects -from nova.objects import base as objects_base -from nova import profiler -from nova import rpc - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -@profiler.trace_cls("rpc") -class CellsAPI(object): - '''Cells client-side RPC API - - API version history: - - * 1.0 - Initial version. - * 1.1 - Adds get_cell_info_for_neighbors() and sync_instances() - * 1.2 - Adds service_get_all(), service_get_by_compute_host(), - and proxy_rpc_to_compute_manager() - * 1.3 - Adds task_log_get_all() - * 1.4 - Adds compute_node_get(), compute_node_get_all(), and - compute_node_stats() - * 1.5 - Adds actions_get(), action_get_by_request_id(), and - action_events_get() - * 1.6 - Adds consoleauth_delete_tokens() and validate_console_port() - - ... Grizzly supports message version 1.6. So, any changes to existing - methods in 2.x after that point should be done such that they can - handle the version_cap being set to 1.6. - - * 1.7 - Adds service_update() - * 1.8 - Adds build_instances(), deprecates schedule_run_instance() - * 1.9 - Adds get_capacities() - * 1.10 - Adds bdm_update_or_create_at_top(), and bdm_destroy_at_top() - * 1.11 - Adds get_migrations() - * 1.12 - Adds instance_start() and instance_stop() - * 1.13 - Adds cell_create(), cell_update(), cell_delete(), and - cell_get() - * 1.14 - Adds reboot_instance() - * 1.15 - Adds suspend_instance() and resume_instance() - * 1.16 - Adds instance_update_from_api() - * 1.17 - Adds get_host_uptime() - * 1.18 - Adds terminate_instance() and soft_delete_instance() - * 1.19 - Adds pause_instance() and unpause_instance() - * 1.20 - Adds resize_instance() and live_migrate_instance() - * 1.21 - Adds revert_resize() and confirm_resize() - * 1.22 - Adds reset_network() - * 1.23 - Adds inject_network_info() - * 1.24 - Adds backup_instance() and snapshot_instance() - - ... Havana supports message version 1.24. So, any changes to existing - methods in 1.x after that point should be done such that they can - handle the version_cap being set to 1.24. - - * 1.25 - Adds rebuild_instance() - * 1.26 - Adds service_delete() - * 1.27 - Updates instance_delete_everywhere() for instance objects - - ... Icehouse supports message version 1.27. So, any changes to - existing methods in 1.x after that point should be done such that they - can handle the version_cap being set to 1.27. - - * 1.28 - Make bdm_update_or_create_at_top and use bdm objects - * 1.29 - Adds set_admin_password() - - ... Juno supports message version 1.29. So, any changes to - existing methods in 1.x after that point should be done such that they - can handle the version_cap being set to 1.29. - - * 1.30 - Make build_instances() use flavor object - * 1.31 - Add clean_shutdown to stop, resize, rescue, and shelve - * 1.32 - Send objects for instances in build_instances() - * 1.33 - Add clean_shutdown to resize_instance() - * 1.34 - build_instances uses BlockDeviceMapping objects, drops - legacy_bdm argument - - ... Kilo supports message version 1.34. So, any changes to - existing methods in 1.x after that point should be done such that they - can handle the version_cap being set to 1.34. - - * 1.35 - Make instance_update_at_top, instance_destroy_at_top - and instance_info_cache_update_at_top use instance objects - * 1.36 - Added 'delete_type' parameter to terminate_instance() - * 1.37 - Add get_keypair_at_top to fetch keypair from api cell - - ... Liberty, Mitaka, Newton, and Ocata support message version 1.37. - So, any changes to existing methods in 1.x after that point should be - done such that they can handle the version_cap being set to - 1.37. - - * 1.38 - Handle uuid parameter in compute_node_get() method. - ''' - - VERSION_ALIASES = { - 'grizzly': '1.6', - 'havana': '1.24', - 'icehouse': '1.27', - 'juno': '1.29', - 'kilo': '1.34', - 'liberty': '1.37', - 'mitaka': '1.37', - 'newton': '1.37', - 'ocata': '1.37', - } - - def __init__(self): - super(CellsAPI, self).__init__() - target = messaging.Target(topic=cells.TOPIC, version='1.0') - version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.cells, - CONF.upgrade_levels.cells) - # NOTE(sbauza): Yes, this is ugly but cells_utils is calling cells.db - # which itself calls cells.rpcapi... You meant import cycling ? Gah. - from nova.cells import utils as cells_utils - serializer = cells_utils.ProxyObjectSerializer() - self.client = rpc.get_client(target, - version_cap=version_cap, - serializer=serializer) - - def cast_compute_api_method(self, ctxt, cell_name, method, - *args, **kwargs): - """Make a cast to a compute API method in a certain cell.""" - method_info = {'method': method, - 'method_args': args, - 'method_kwargs': kwargs} - self.client.cast(ctxt, 'run_compute_api_method', - cell_name=cell_name, - method_info=method_info, - call=False) - - def call_compute_api_method(self, ctxt, cell_name, method, - *args, **kwargs): - """Make a call to a compute API method in a certain cell.""" - method_info = {'method': method, - 'method_args': args, - 'method_kwargs': kwargs} - return self.client.call(ctxt, 'run_compute_api_method', - cell_name=cell_name, - method_info=method_info, - call=True) - - def build_instances(self, ctxt, **kwargs): - """Build instances.""" - build_inst_kwargs = kwargs - instances = build_inst_kwargs['instances'] - build_inst_kwargs['image'] = jsonutils.to_primitive( - build_inst_kwargs['image']) - - version = '1.34' - if self.client.can_send_version('1.34'): - build_inst_kwargs.pop('legacy_bdm', None) - else: - bdm_p = objects_base.obj_to_primitive( - build_inst_kwargs['block_device_mapping']) - build_inst_kwargs['block_device_mapping'] = bdm_p - version = '1.32' - if not self.client.can_send_version('1.32'): - instances_p = [jsonutils.to_primitive(inst) for inst in instances] - build_inst_kwargs['instances'] = instances_p - version = '1.30' - if not self.client.can_send_version('1.30'): - if 'filter_properties' in build_inst_kwargs: - filter_properties = build_inst_kwargs['filter_properties'] - flavor = filter_properties['instance_type'] - flavor_p = objects_base.obj_to_primitive(flavor) - filter_properties['instance_type'] = flavor_p - version = '1.8' - cctxt = self.client.prepare(version=version) - cctxt.cast(ctxt, 'build_instances', - build_inst_kwargs=build_inst_kwargs) - - def instance_update_at_top(self, ctxt, instance): - """Update instance at API level.""" - version = '1.35' - if not self.client.can_send_version('1.35'): - instance = objects_base.obj_to_primitive(instance) - version = '1.34' - cctxt = self.client.prepare(version=version) - cctxt.cast(ctxt, 'instance_update_at_top', instance=instance) - - def instance_destroy_at_top(self, ctxt, instance): - """Destroy instance at API level.""" - version = '1.35' - if not self.client.can_send_version('1.35'): - instance = objects_base.obj_to_primitive(instance) - version = '1.34' - cctxt = self.client.prepare(version=version) - cctxt.cast(ctxt, 'instance_destroy_at_top', instance=instance) - - def instance_delete_everywhere(self, ctxt, instance, delete_type): - """Delete instance everywhere. delete_type may be 'soft' - or 'hard'. This is generally only used to resolve races - when API cell doesn't know to what cell an instance belongs. - """ - if self.client.can_send_version('1.27'): - version = '1.27' - else: - version = '1.0' - instance = jsonutils.to_primitive(instance) - cctxt = self.client.prepare(version=version) - cctxt.cast(ctxt, 'instance_delete_everywhere', instance=instance, - delete_type=delete_type) - - def instance_fault_create_at_top(self, ctxt, instance_fault): - """Create an instance fault at the top.""" - instance_fault_p = jsonutils.to_primitive(instance_fault) - self.client.cast(ctxt, 'instance_fault_create_at_top', - instance_fault=instance_fault_p) - - def bw_usage_update_at_top(self, ctxt, uuid, mac, start_period, - bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None): - """Broadcast upwards that bw_usage was updated.""" - bw_update_info = {'uuid': uuid, - 'mac': mac, - 'start_period': start_period, - 'bw_in': bw_in, - 'bw_out': bw_out, - 'last_ctr_in': last_ctr_in, - 'last_ctr_out': last_ctr_out, - 'last_refreshed': last_refreshed} - self.client.cast(ctxt, 'bw_usage_update_at_top', - bw_update_info=bw_update_info) - - def instance_info_cache_update_at_top(self, ctxt, instance_info_cache): - """Broadcast up that an instance's info_cache has changed.""" - version = '1.35' - instance = objects.Instance(uuid=instance_info_cache.instance_uuid, - info_cache=instance_info_cache) - if not self.client.can_send_version('1.35'): - instance = objects_base.obj_to_primitive(instance) - version = '1.34' - cctxt = self.client.prepare(version=version) - cctxt.cast(ctxt, 'instance_update_at_top', instance=instance) - - def get_cell_info_for_neighbors(self, ctxt): - """Get information about our neighbor cells from the manager.""" - if not CONF.cells.enable: - return [] - cctxt = self.client.prepare(version='1.1') - return cctxt.call(ctxt, 'get_cell_info_for_neighbors') - - def sync_instances(self, ctxt, project_id=None, updated_since=None, - deleted=False): - """Ask all cells to sync instance data.""" - cctxt = self.client.prepare(version='1.1') - return cctxt.cast(ctxt, 'sync_instances', - project_id=project_id, - updated_since=updated_since, - deleted=deleted) - - def service_get_all(self, ctxt, filters=None): - """Ask all cells for their list of services.""" - cctxt = self.client.prepare(version='1.2') - return cctxt.call(ctxt, 'service_get_all', filters=filters) - - def service_get_by_compute_host(self, ctxt, host_name): - """Get the service entry for a host in a particular cell. The - cell name should be encoded within the host_name. - """ - cctxt = self.client.prepare(version='1.2') - return cctxt.call(ctxt, 'service_get_by_compute_host', - host_name=host_name) - - def get_host_uptime(self, context, host_name): - """Gets the host uptime in a particular cell. The cell name should - be encoded within the host_name - """ - cctxt = self.client.prepare(version='1.17') - return cctxt.call(context, 'get_host_uptime', host_name=host_name) - - def service_update(self, ctxt, host_name, binary, params_to_update): - """Used to enable/disable a service. For compute services, setting to - disabled stops new builds arriving on that host. - - :param host_name: the name of the host machine that the service is - running - :param binary: The name of the executable that the service runs as - :param params_to_update: eg. {'disabled': True} - """ - cctxt = self.client.prepare(version='1.7') - return cctxt.call(ctxt, 'service_update', - host_name=host_name, - binary=binary, - params_to_update=params_to_update) - - def service_delete(self, ctxt, cell_service_id): - """Deletes the specified service.""" - cctxt = self.client.prepare(version='1.26') - cctxt.call(ctxt, 'service_delete', - cell_service_id=cell_service_id) - - def proxy_rpc_to_manager(self, ctxt, rpc_message, topic, call=False, - timeout=None): - """Proxy RPC to a compute manager. The host in the topic - should be encoded with the target cell name. - """ - cctxt = self.client.prepare(version='1.2', timeout=timeout) - return cctxt.call(ctxt, 'proxy_rpc_to_manager', - topic=topic, - rpc_message=rpc_message, - call=call) - - def task_log_get_all(self, ctxt, task_name, period_beginning, - period_ending, host=None, state=None): - """Get the task logs from the DB in child cells.""" - cctxt = self.client.prepare(version='1.3') - return cctxt.call(ctxt, 'task_log_get_all', - task_name=task_name, - period_beginning=period_beginning, - period_ending=period_ending, - host=host, state=state) - - def compute_node_get(self, ctxt, compute_id): - """Get a compute node by ID or UUID in a specific cell.""" - version = '1.38' - if uuidutils.is_uuid_like(compute_id): - if not self.client.can_send_version(version): - LOG.warning('Unable to get compute node by UUID %s; service ' - 'is too old or the version is capped.', compute_id) - raise exception.ComputeHostNotFound(host=compute_id) - else: - version = '1.4' - cctxt = self.client.prepare(version=version) - return cctxt.call(ctxt, 'compute_node_get', compute_id=compute_id) - - def compute_node_get_all(self, ctxt, hypervisor_match=None): - """Return list of compute nodes in all cells, optionally - filtering by hypervisor host. - """ - cctxt = self.client.prepare(version='1.4') - return cctxt.call(ctxt, 'compute_node_get_all', - hypervisor_match=hypervisor_match) - - def compute_node_stats(self, ctxt): - """Return compute node stats from all cells.""" - cctxt = self.client.prepare(version='1.4') - return cctxt.call(ctxt, 'compute_node_stats') - - def actions_get(self, ctxt, instance): - if not instance['cell_name']: - raise exception.InstanceUnknownCell(instance_uuid=instance['uuid']) - cctxt = self.client.prepare(version='1.5') - return cctxt.call(ctxt, 'actions_get', - cell_name=instance['cell_name'], - instance_uuid=instance['uuid']) - - def action_get_by_request_id(self, ctxt, instance, request_id): - if not instance['cell_name']: - raise exception.InstanceUnknownCell(instance_uuid=instance['uuid']) - cctxt = self.client.prepare(version='1.5') - return cctxt.call(ctxt, 'action_get_by_request_id', - cell_name=instance['cell_name'], - instance_uuid=instance['uuid'], - request_id=request_id) - - def action_events_get(self, ctxt, instance, action_id): - if not instance['cell_name']: - raise exception.InstanceUnknownCell(instance_uuid=instance['uuid']) - cctxt = self.client.prepare(version='1.5') - return cctxt.call(ctxt, 'action_events_get', - cell_name=instance['cell_name'], - action_id=action_id) - - def consoleauth_delete_tokens(self, ctxt, instance_uuid): - """Delete consoleauth tokens for an instance in API cells.""" - cctxt = self.client.prepare(version='1.6') - cctxt.cast(ctxt, 'consoleauth_delete_tokens', - instance_uuid=instance_uuid) - - def validate_console_port(self, ctxt, instance_uuid, console_port, - console_type): - """Validate console port with child cell compute node.""" - cctxt = self.client.prepare(version='1.6') - return cctxt.call(ctxt, 'validate_console_port', - instance_uuid=instance_uuid, - console_port=console_port, - console_type=console_type) - - def get_capacities(self, ctxt, cell_name=None): - cctxt = self.client.prepare(version='1.9') - return cctxt.call(ctxt, 'get_capacities', cell_name=cell_name) - - def bdm_update_or_create_at_top(self, ctxt, bdm, create=None): - """Create or update a block device mapping in API cells. If - create is True, only try to create. If create is None, try to - update but fall back to create. If create is False, only attempt - to update. This maps to nova-conductor's behavior. - """ - if self.client.can_send_version('1.28'): - version = '1.28' - else: - version = '1.10' - bdm = objects_base.obj_to_primitive(bdm) - cctxt = self.client.prepare(version=version) - - try: - cctxt.cast(ctxt, 'bdm_update_or_create_at_top', - bdm=bdm, create=create) - except Exception: - LOG.exception("Failed to notify cells of BDM update/create.") - - def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None, - volume_id=None): - """Broadcast upwards that a block device mapping was destroyed. - One of device_name or volume_id should be specified. - """ - cctxt = self.client.prepare(version='1.10') - try: - cctxt.cast(ctxt, 'bdm_destroy_at_top', - instance_uuid=instance_uuid, - device_name=device_name, - volume_id=volume_id) - except Exception: - LOG.exception("Failed to notify cells of BDM destroy.") - - def get_migrations(self, ctxt, filters): - """Get all migrations applying the filters.""" - cctxt = self.client.prepare(version='1.11') - return cctxt.call(ctxt, 'get_migrations', filters=filters) - - def instance_update_from_api(self, ctxt, instance, expected_vm_state, - expected_task_state, admin_state_reset): - """Update an instance in its cell. - - This method takes a new-world instance object. - """ - cctxt = self.client.prepare(version='1.16') - cctxt.cast(ctxt, 'instance_update_from_api', - instance=instance, - expected_vm_state=expected_vm_state, - expected_task_state=expected_task_state, - admin_state_reset=admin_state_reset) - - def start_instance(self, ctxt, instance): - """Start an instance in its cell. - - This method takes a new-world instance object. - """ - cctxt = self.client.prepare(version='1.12') - cctxt.cast(ctxt, 'start_instance', instance=instance) - - def stop_instance(self, ctxt, instance, do_cast=True, clean_shutdown=True): - """Stop an instance in its cell. - - This method takes a new-world instance object. - """ - msg_args = {'instance': instance, - 'do_cast': do_cast} - if self.client.can_send_version('1.31'): - version = '1.31' - msg_args['clean_shutdown'] = clean_shutdown - else: - version = '1.12' - cctxt = self.client.prepare(version=version) - method = do_cast and cctxt.cast or cctxt.call - return method(ctxt, 'stop_instance', **msg_args) - - def cell_create(self, ctxt, values): - cctxt = self.client.prepare(version='1.13') - return cctxt.call(ctxt, 'cell_create', values=values) - - def cell_update(self, ctxt, cell_name, values): - cctxt = self.client.prepare(version='1.13') - return cctxt.call(ctxt, 'cell_update', - cell_name=cell_name, values=values) - - def cell_delete(self, ctxt, cell_name): - cctxt = self.client.prepare(version='1.13') - return cctxt.call(ctxt, 'cell_delete', cell_name=cell_name) - - def cell_get(self, ctxt, cell_name): - cctxt = self.client.prepare(version='1.13') - return cctxt.call(ctxt, 'cell_get', cell_name=cell_name) - - def reboot_instance(self, ctxt, instance, block_device_info, - reboot_type): - """Reboot an instance in its cell. - - This method takes a new-world instance object. - """ - cctxt = self.client.prepare(version='1.14') - cctxt.cast(ctxt, 'reboot_instance', instance=instance, - reboot_type=reboot_type) - - def pause_instance(self, ctxt, instance): - """Pause an instance in its cell. - - This method takes a new-world instance object. - """ - cctxt = self.client.prepare(version='1.19') - cctxt.cast(ctxt, 'pause_instance', instance=instance) - - def unpause_instance(self, ctxt, instance): - """Unpause an instance in its cell. - - This method takes a new-world instance object. - """ - cctxt = self.client.prepare(version='1.19') - cctxt.cast(ctxt, 'unpause_instance', instance=instance) - - def suspend_instance(self, ctxt, instance): - """Suspend an instance in its cell. - - This method takes a new-world instance object. - """ - cctxt = self.client.prepare(version='1.15') - cctxt.cast(ctxt, 'suspend_instance', instance=instance) - - def resume_instance(self, ctxt, instance): - """Resume an instance in its cell. - - This method takes a new-world instance object. - """ - cctxt = self.client.prepare(version='1.15') - cctxt.cast(ctxt, 'resume_instance', instance=instance) - - def terminate_instance(self, ctxt, instance, bdms, reservations=None, - delete_type='delete'): - """Delete an instance in its cell. - - This method takes a new-world instance object. - """ - msg_kwargs = {'instance': instance} - if self.client.can_send_version('1.36'): - version = '1.36' - msg_kwargs['delete_type'] = delete_type - else: - version = '1.18' - cctxt = self.client.prepare(version=version) - cctxt.cast(ctxt, 'terminate_instance', **msg_kwargs) - - def soft_delete_instance(self, ctxt, instance, reservations=None): - """Soft-delete an instance in its cell. - - This method takes a new-world instance object. - """ - cctxt = self.client.prepare(version='1.18') - cctxt.cast(ctxt, 'soft_delete_instance', instance=instance) - - def resize_instance(self, ctxt, instance, extra_instance_updates, - scheduler_hint, flavor, reservations=None, - clean_shutdown=True, - request_spec=None): - # NOTE(sbauza): Since Cells v1 is quite feature-frozen, we don't want - # to pass down request_spec to the manager and rather keep the - # cell conductor providing a new RequestSpec like the original - # behaviour - flavor_p = jsonutils.to_primitive(flavor) - version = '1.33' - msg_args = {'instance': instance, - 'flavor': flavor_p, - 'extra_instance_updates': extra_instance_updates, - 'clean_shutdown': clean_shutdown} - if not self.client.can_send_version(version): - del msg_args['clean_shutdown'] - version = '1.20' - - cctxt = self.client.prepare(version=version) - cctxt.cast(ctxt, 'resize_instance', **msg_args) - - def live_migrate_instance(self, ctxt, instance, host_name, - block_migration, disk_over_commit, - request_spec=None): - # NOTE(sbauza): Since Cells v1 is quite feature-freeze, we don't want - # to pass down request_spec to the manager and rather keep the - # cell conductor providing a new RequestSpec like the original - # behaviour - cctxt = self.client.prepare(version='1.20') - cctxt.cast(ctxt, 'live_migrate_instance', - instance=instance, - block_migration=block_migration, - disk_over_commit=disk_over_commit, - host_name=host_name) - - def revert_resize(self, ctxt, instance, migration, host, - reservations=None): - cctxt = self.client.prepare(version='1.21') - cctxt.cast(ctxt, 'revert_resize', instance=instance) - - def confirm_resize(self, ctxt, instance, migration, host, - reservations=None, cast=True): - # NOTE(comstud): This is only used in the API cell where we should - # always cast and ignore the 'cast' kwarg. - # Also, the compute api method normally takes an optional - # 'migration_ref' argument. But this is only used from the manager - # back to the API... which would happen in the child cell. - cctxt = self.client.prepare(version='1.21') - cctxt.cast(ctxt, 'confirm_resize', instance=instance) - - def reset_network(self, ctxt, instance): - """Reset networking for an instance.""" - cctxt = self.client.prepare(version='1.22') - cctxt.cast(ctxt, 'reset_network', instance=instance) - - def inject_network_info(self, ctxt, instance): - """Inject networking for an instance.""" - cctxt = self.client.prepare(version='1.23') - cctxt.cast(ctxt, 'inject_network_info', instance=instance) - - def snapshot_instance(self, ctxt, instance, image_id): - cctxt = self.client.prepare(version='1.24') - cctxt.cast(ctxt, 'snapshot_instance', - instance=instance, image_id=image_id) - - def backup_instance(self, ctxt, instance, image_id, backup_type, rotation): - cctxt = self.client.prepare(version='1.24') - cctxt.cast(ctxt, 'backup_instance', - instance=instance, - image_id=image_id, - backup_type=backup_type, - rotation=rotation) - - def rebuild_instance(self, ctxt, instance, new_pass, injected_files, - image_ref, orig_image_ref, orig_sys_metadata, bdms, - recreate=False, on_shared_storage=False, host=None, - preserve_ephemeral=False, request_spec=None, - kwargs=None): - # NOTE(sbauza): Since Cells v1 is quite feature-freeze, we don't want - # to pass down request_spec to the manager and rather keep the - # cell conductor providing a new RequestSpec like the original - # behaviour - cctxt = self.client.prepare(version='1.25') - cctxt.cast(ctxt, 'rebuild_instance', - instance=instance, image_href=image_ref, - admin_password=new_pass, files_to_inject=injected_files, - preserve_ephemeral=preserve_ephemeral, kwargs=kwargs) - - def set_admin_password(self, ctxt, instance, new_pass): - cctxt = self.client.prepare(version='1.29') - cctxt.cast(ctxt, 'set_admin_password', instance=instance, - new_pass=new_pass) - - def get_keypair_at_top(self, ctxt, user_id, name): - if not CONF.cells.enable: - return - - cctxt = self.client.prepare(version='1.37') - keypair = cctxt.call(ctxt, 'get_keypair_at_top', user_id=user_id, - name=name) - if keypair is None: - raise exception.KeypairNotFound(user_id=user_id, - name=name) - return keypair diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py deleted file mode 100644 index 752d7e4d89b..00000000000 --- a/nova/cells/scheduler.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cells Scheduler -""" -import copy -import time - -from oslo_log import log as logging -from six.moves import range - -from nova.cells import filters -from nova.cells import weights -from nova import compute -from nova.compute import instance_actions -from nova.compute import vm_states -from nova import conductor -import nova.conf -from nova.db import base -from nova import exception -from nova import objects -from nova.objects import base as obj_base -from nova.scheduler import utils as scheduler_utils -from nova import utils - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -class CellsScheduler(base.Base): - """The cells scheduler.""" - - def __init__(self, msg_runner): - super(CellsScheduler, self).__init__() - self.msg_runner = msg_runner - self.state_manager = msg_runner.state_manager - self.compute_api = compute.API() - self.compute_task_api = conductor.ComputeTaskAPI() - self.filter_handler = filters.CellFilterHandler() - filter_classes = self.filter_handler.get_matching_classes( - CONF.cells.scheduler_filter_classes) - self.filters = [cls() for cls in filter_classes] - self.weight_handler = weights.CellWeightHandler() - weigher_classes = self.weight_handler.get_matching_classes( - CONF.cells.scheduler_weight_classes) - self.weighers = [cls() for cls in weigher_classes] - - def _create_instances_here(self, ctxt, instance_uuids, instance_properties, - instance_type, image, security_groups, block_device_mapping): - instance_values = copy.copy(instance_properties) - # The parent may pass these metadata values as lists, and the - # create call expects it to be a dict. - instance_values['metadata'] = utils.instance_meta(instance_values) - # Pop out things that will get set properly when re-creating the - # instance record. - instance_values.pop('id') - instance_values.pop('name') - instance_values.pop('info_cache') - instance_values.pop('security_groups') - instance_values.pop('flavor') - - # FIXME(danms): The instance was brutally serialized before being - # sent over RPC to us. Thus, the pci_requests value wasn't really - # sent in a useful form. Since it was getting ignored for cells - # before it was part of the Instance, skip it now until cells RPC - # is sending proper instance objects. - instance_values.pop('pci_requests', None) - - # FIXME(danms): Same for ec2_ids - instance_values.pop('ec2_ids', None) - - # FIXME(danms): Same for keypairs - instance_values.pop('keypairs', None) - - instances = [] - num_instances = len(instance_uuids) - security_groups = ( - self.compute_api.security_group_api.populate_security_groups( - security_groups)) - for i, instance_uuid in enumerate(instance_uuids): - instance = objects.Instance(context=ctxt) - instance.update(instance_values) - instance.uuid = instance_uuid - instance.flavor = instance_type - instance.old_flavor = None - instance.new_flavor = None - instance = self.compute_api.create_db_entry_for_new_instance( - ctxt, - instance_type, - image, - instance, - security_groups, - block_device_mapping, - num_instances, i) - block_device_mapping = ( - self.compute_api._bdm_validate_set_size_and_instance( - ctxt, instance, instance_type, block_device_mapping)) - self.compute_api._create_block_device_mapping(block_device_mapping) - - instances.append(instance) - self.msg_runner.instance_update_at_top(ctxt, instance) - return instances - - def _create_action_here(self, ctxt, instance_uuids): - for instance_uuid in instance_uuids: - objects.InstanceAction.action_start( - ctxt, - instance_uuid, - instance_actions.CREATE, - want_result=False) - - def _get_possible_cells(self): - cells = self.state_manager.get_child_cells() - our_cell = self.state_manager.get_my_state() - # Include our cell in the list, if we have any capacity info - if not cells or our_cell.capacities: - cells.append(our_cell) - return cells - - def _grab_target_cells(self, filter_properties): - cells = self._get_possible_cells() - cells = self.filter_handler.get_filtered_objects(self.filters, cells, - filter_properties) - # NOTE(comstud): I know this reads weird, but the 'if's are nested - # this way to optimize for the common case where 'cells' is a list - # containing at least 1 entry. - if not cells: - if cells is None: - # None means to bypass further scheduling as a filter - # took care of everything. - return - raise exception.NoCellsAvailable() - - weighted_cells = self.weight_handler.get_weighed_objects( - self.weighers, cells, filter_properties) - LOG.debug("Weighted cells: %(weighted_cells)s", - {'weighted_cells': weighted_cells}) - target_cells = [cell.obj for cell in weighted_cells] - return target_cells - - def _build_instances(self, message, target_cells, instance_uuids, - build_inst_kwargs): - """Attempt to build instance(s) or send msg to child cell.""" - ctxt = message.ctxt - instance_properties = obj_base.obj_to_primitive( - build_inst_kwargs['instances'][0]) - filter_properties = build_inst_kwargs['filter_properties'] - instance_type = filter_properties['instance_type'] - image = build_inst_kwargs['image'] - security_groups = build_inst_kwargs['security_groups'] - block_device_mapping = build_inst_kwargs['block_device_mapping'] - - LOG.debug("Building instances with routing_path=%(routing_path)s", - {'routing_path': message.routing_path}) - - for target_cell in target_cells: - try: - if target_cell.is_me: - # Need to create instance DB entries as the conductor - # expects that the instance(s) already exists. - instances = self._create_instances_here(ctxt, - instance_uuids, instance_properties, instance_type, - image, security_groups, block_device_mapping) - build_inst_kwargs['instances'] = instances - # Need to record the create action in the db as the - # conductor expects it to already exist. - self._create_action_here(ctxt, instance_uuids) - self.compute_task_api.build_instances(ctxt, - **build_inst_kwargs) - return - self.msg_runner.build_instances(ctxt, target_cell, - build_inst_kwargs) - return - except Exception: - LOG.exception("Couldn't communicate with cell '%s'", - target_cell.name) - # FIXME(comstud): Would be nice to kick this back up so that - # the parent cell could retry, if we had a parent. - LOG.error("Couldn't communicate with any cells") - raise exception.NoCellsAvailable() - - def build_instances(self, message, build_inst_kwargs): - image = build_inst_kwargs['image'] - instance_uuids = [inst['uuid'] for inst in - build_inst_kwargs['instances']] - instances = build_inst_kwargs['instances'] - request_spec = scheduler_utils.build_request_spec(image, instances) - filter_properties = copy.copy(build_inst_kwargs['filter_properties']) - filter_properties.update({'context': message.ctxt, - 'scheduler': self, - 'routing_path': message.routing_path, - 'host_sched_kwargs': build_inst_kwargs, - 'request_spec': request_spec}) - - self._schedule_build_to_cells(message, instance_uuids, - filter_properties, self._build_instances, build_inst_kwargs) - - def _schedule_build_to_cells(self, message, instance_uuids, - filter_properties, method, method_kwargs): - """Pick a cell where we should create a new instance(s).""" - try: - for i in range(max(0, CONF.cells.scheduler_retries) + 1): - try: - target_cells = self._grab_target_cells(filter_properties) - if target_cells is None: - # a filter took care of scheduling. skip. - return - - return method(message, target_cells, instance_uuids, - method_kwargs) - except exception.NoCellsAvailable: - if i == max(0, CONF.cells.scheduler_retries): - raise - sleep_time = max(1, CONF.cells.scheduler_retry_delay) - LOG.info("No cells available when scheduling. Will " - "retry in %(sleep_time)s second(s)", - {'sleep_time': sleep_time}) - time.sleep(sleep_time) - continue - except Exception: - LOG.exception("Error scheduling instances %(instance_uuids)s", - {'instance_uuids': instance_uuids}) - ctxt = message.ctxt - for instance_uuid in instance_uuids: - instance = objects.Instance(context=ctxt, uuid=instance_uuid, - vm_state=vm_states.ERROR) - self.msg_runner.instance_update_at_top(ctxt, instance) - try: - instance.vm_state = vm_states.ERROR - instance.save() - except Exception: - pass diff --git a/nova/cells/state.py b/nova/cells/state.py deleted file mode 100644 index 1d471220f2d..00000000000 --- a/nova/cells/state.py +++ /dev/null @@ -1,499 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -CellState Manager -""" -import collections -import copy -import datetime -import functools -import time - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import units -import six - -from nova.cells import rpc_driver -import nova.conf -from nova import context -from nova.db import base -from nova import exception -from nova import objects -from nova import rpc -from nova import servicegroup -from nova import utils - - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -class CellState(object): - """Holds information for a particular cell.""" - def __init__(self, cell_name, is_me=False): - self.name = cell_name - self.is_me = is_me - self.last_seen = datetime.datetime.min - self.capabilities = {} - self.capacities = {} - self.db_info = {} - # TODO(comstud): The DB will specify the driver to use to talk - # to this cell, but there's no column for this yet. The only - # available driver is the rpc driver. - self.driver = rpc_driver.CellsRPCDriver() - - def update_db_info(self, cell_db_info): - """Update cell credentials from db.""" - self.db_info = {k: v for k, v in cell_db_info.items() - if k != 'name'} - - def update_capabilities(self, cell_metadata): - """Update cell capabilities for a cell.""" - self.last_seen = timeutils.utcnow() - self.capabilities = cell_metadata - - def update_capacities(self, capacities): - """Update capacity information for a cell.""" - self.last_seen = timeutils.utcnow() - self.capacities = capacities - - def get_cell_info(self): - """Return subset of cell information for OS API use.""" - db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset'] - url_fields_to_return = { - 'username': 'username', - 'hostname': 'rpc_host', - 'port': 'rpc_port', - } - cell_info = dict(name=self.name, capabilities=self.capabilities) - if self.db_info: - for field in db_fields_to_return: - cell_info[field] = self.db_info[field] - - url = rpc.get_transport_url(self.db_info['transport_url']) - if url.hosts: - for field, canonical in url_fields_to_return.items(): - cell_info[canonical] = getattr(url.hosts[0], field) - return cell_info - - def send_message(self, message): - """Send a message to a cell. Just forward this to the driver, - passing ourselves and the message as arguments. - """ - self.driver.send_message_to_cell(self, message) - - def __repr__(self): - me = "me" if self.is_me else "not_me" - return "Cell '%s' (%s)" % (self.name, me) - - -def sync_before(f): - """Use as a decorator to wrap methods that use cell information to - make sure they sync the latest information from the DB periodically. - """ - @functools.wraps(f) - def wrapper(self, *args, **kwargs): - self._cell_data_sync() - return f(self, *args, **kwargs) - return wrapper - - -def sync_after(f): - """Use as a decorator to wrap methods that update cell information - in the database to make sure the data is synchronized immediately. - """ - @functools.wraps(f) - def wrapper(self, *args, **kwargs): - result = f(self, *args, **kwargs) - self._cell_data_sync(force=True) - return result - return wrapper - - -_unset = object() - - -class CellStateManager(base.Base): - def __new__(cls, cell_state_cls=None, cells_config=_unset): - if cls is not CellStateManager: - return super(CellStateManager, cls).__new__(cls) - - if cells_config is _unset: - cells_config = CONF.cells.cells_config - - if cells_config: - return CellStateManagerFile(cell_state_cls) - - return CellStateManagerDB(cell_state_cls) - - def __init__(self, cell_state_cls=None): - super(CellStateManager, self).__init__() - if not cell_state_cls: - cell_state_cls = CellState - self.cell_state_cls = cell_state_cls - self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True) - self.parent_cells = {} - self.child_cells = {} - self.last_cell_db_check = datetime.datetime.min - self.servicegroup_api = servicegroup.API() - - attempts = 0 - while True: - try: - self._cell_data_sync(force=True) - break - except db_exc.DBError: - attempts += 1 - if attempts > 120: - raise - LOG.exception('DB error') - time.sleep(30) - - my_cell_capabs = {} - for cap in CONF.cells.capabilities: - name, value = cap.split('=', 1) - if ';' in value: - values = set(value.split(';')) - else: - values = set([value]) - my_cell_capabs[name] = values - self.my_cell_state.update_capabilities(my_cell_capabs) - - def _refresh_cells_from_dict(self, db_cells_dict): - """Make our cell info map match the db.""" - - # Update current cells. Delete ones that disappeared - for cells_dict in (self.parent_cells, self.child_cells): - for cell_name, cell_info in cells_dict.items(): - is_parent = cell_info.db_info['is_parent'] - db_dict = db_cells_dict.get(cell_name) - if db_dict and is_parent == db_dict['is_parent']: - cell_info.update_db_info(db_dict) - else: - del cells_dict[cell_name] - - # Add new cells - for cell_name, db_info in db_cells_dict.items(): - if db_info['is_parent']: - cells_dict = self.parent_cells - else: - cells_dict = self.child_cells - if cell_name not in cells_dict: - cells_dict[cell_name] = self.cell_state_cls(cell_name) - cells_dict[cell_name].update_db_info(db_info) - - def _time_to_sync(self): - """Is it time to sync the DB against our memory cache?""" - diff = timeutils.utcnow() - self.last_cell_db_check - return diff.seconds >= CONF.cells.db_check_interval - - def _update_our_capacity(self, ctxt=None): - """Update our capacity in the self.my_cell_state CellState. - - This will add/update 2 entries in our CellState.capacities, - 'ram_free' and 'disk_free'. - - The values of these are both dictionaries with the following - format: - - {'total_mb': , - 'units_by_mb: } - - contains the number of units that we can build for - every distinct memory or disk requirement that we have based on - instance types. This number is computed by looking at room available - on every compute_node. - - Take the following instance_types as an example: - - [{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100}, - {'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}] - - capacities['ram_free']['units_by_mb'] would contain the following: - - {'1024': , - '2048': } - - capacities['disk_free']['units_by_mb'] would contain the following: - - {'122880': , - '225280': } - - Units are in MB, so 122880 = (10 + 100) * 1024. - - NOTE(comstud): Perhaps we should only report a single number - available per instance_type. - """ - - if not ctxt: - ctxt = context.get_admin_context() - - reserve_level = CONF.cells.reserve_percent / 100.0 - - def _defaultdict_int(): - return collections.defaultdict(int) - compute_hosts = collections.defaultdict(_defaultdict_int) - - def _get_compute_hosts(): - service_refs = {service.host: service - for service in objects.ServiceList.get_by_binary( - ctxt, 'nova-compute')} - - compute_nodes = objects.ComputeNodeList.get_all(ctxt) - for compute in compute_nodes: - host = compute.host - service = service_refs.get(host) - if not service or service['disabled']: - continue - - # NOTE: This works because it is only used for computes found - # in the cell this is run in. It can not be used to check on - # computes in a child cell from the api cell. If this is run - # in the api cell objects.ComputeNodeList.get_all() above will - # return an empty list. - alive = self.servicegroup_api.service_is_up(service) - if not alive: - continue - - chost = compute_hosts[host] - chost['free_ram_mb'] += max(0, compute.free_ram_mb) - chost['free_disk_mb'] += max(0, compute.free_disk_gb) * 1024 - chost['total_ram_mb'] += max(0, compute.memory_mb) - chost['total_disk_mb'] += max(0, compute.local_gb) * 1024 - - _get_compute_hosts() - if not compute_hosts: - self.my_cell_state.update_capacities({}) - return - - ram_mb_free_units = {} - disk_mb_free_units = {} - total_ram_mb_free = 0 - total_disk_mb_free = 0 - - def _free_units(total, free, per_inst): - if per_inst: - min_free = total * reserve_level - free = max(0, free - min_free) - return int(free / per_inst) - else: - return 0 - - flavors = objects.FlavorList.get_all(ctxt) - memory_mb_slots = frozenset( - [flavor.memory_mb for flavor in flavors]) - disk_mb_slots = frozenset( - [(flavor.root_gb + flavor.ephemeral_gb) * units.Ki - for flavor in flavors]) - - for compute_values in compute_hosts.values(): - total_ram_mb_free += compute_values['free_ram_mb'] - total_disk_mb_free += compute_values['free_disk_mb'] - for memory_mb_slot in memory_mb_slots: - ram_mb_free_units.setdefault(str(memory_mb_slot), 0) - free_units = _free_units(compute_values['total_ram_mb'], - compute_values['free_ram_mb'], memory_mb_slot) - ram_mb_free_units[str(memory_mb_slot)] += free_units - for disk_mb_slot in disk_mb_slots: - disk_mb_free_units.setdefault(str(disk_mb_slot), 0) - free_units = _free_units(compute_values['total_disk_mb'], - compute_values['free_disk_mb'], disk_mb_slot) - disk_mb_free_units[str(disk_mb_slot)] += free_units - - capacities = {'ram_free': {'total_mb': total_ram_mb_free, - 'units_by_mb': ram_mb_free_units}, - 'disk_free': {'total_mb': total_disk_mb_free, - 'units_by_mb': disk_mb_free_units}} - self.my_cell_state.update_capacities(capacities) - - @sync_before - def get_cell_info_for_neighbors(self): - """Return cell information for all neighbor cells.""" - cell_list = [cell.get_cell_info() - for cell in six.itervalues(self.child_cells)] - cell_list.extend([cell.get_cell_info() - for cell in six.itervalues(self.parent_cells)]) - return cell_list - - @sync_before - def get_my_state(self): - """Return information for my (this) cell.""" - return self.my_cell_state - - @sync_before - def get_child_cells(self): - """Return list of child cell_infos.""" - return list(self.child_cells.values()) - - @sync_before - def get_parent_cells(self): - """Return list of parent cell_infos.""" - return list(self.parent_cells.values()) - - @sync_before - def get_parent_cell(self, cell_name): - return self.parent_cells.get(cell_name) - - @sync_before - def get_child_cell(self, cell_name): - return self.child_cells.get(cell_name) - - @sync_before - def update_cell_capabilities(self, cell_name, capabilities): - """Update capabilities for a cell.""" - cell = (self.child_cells.get(cell_name) or - self.parent_cells.get(cell_name)) - if not cell: - LOG.error("Unknown cell '%(cell_name)s' when trying to " - "update capabilities", - {'cell_name': cell_name}) - return - # Make sure capabilities are sets. - for capab_name, values in capabilities.items(): - capabilities[capab_name] = set(values) - cell.update_capabilities(capabilities) - - @sync_before - def update_cell_capacities(self, cell_name, capacities): - """Update capacities for a cell.""" - cell = (self.child_cells.get(cell_name) or - self.parent_cells.get(cell_name)) - if not cell: - LOG.error("Unknown cell '%(cell_name)s' when trying to " - "update capacities", - {'cell_name': cell_name}) - return - cell.update_capacities(capacities) - - @sync_before - def get_our_capabilities(self, include_children=True): - capabs = copy.deepcopy(self.my_cell_state.capabilities) - if include_children: - for cell in self.child_cells.values(): - if timeutils.is_older_than(cell.last_seen, - CONF.cells.mute_child_interval): - continue - for capab_name, values in cell.capabilities.items(): - if capab_name not in capabs: - capabs[capab_name] = set([]) - capabs[capab_name] |= values - return capabs - - def _add_to_dict(self, target, src): - for key, value in src.items(): - if isinstance(value, dict): - target.setdefault(key, {}) - self._add_to_dict(target[key], value) - continue - target.setdefault(key, 0) - target[key] += value - - @sync_before - def get_our_capacities(self, include_children=True): - capacities = copy.deepcopy(self.my_cell_state.capacities) - if include_children: - for cell in self.child_cells.values(): - self._add_to_dict(capacities, cell.capacities) - return capacities - - @sync_before - def get_capacities(self, cell_name=None): - if not cell_name or cell_name == self.my_cell_state.name: - return self.get_our_capacities() - if cell_name in self.child_cells: - return self.child_cells[cell_name].capacities - raise exception.CellNotFound(cell_name=cell_name) - - @sync_before - def cell_get(self, ctxt, cell_name): - for cells_dict in (self.parent_cells, self.child_cells): - if cell_name in cells_dict: - return cells_dict[cell_name] - - raise exception.CellNotFound(cell_name=cell_name) - - -class CellStateManagerDB(CellStateManager): - @utils.synchronized('cell-db-sync') - def _cell_data_sync(self, force=False): - """Update cell status for all cells from the backing data store - when necessary. - - :param force: If True, cell status will be updated regardless - of whether it's time to do so. - """ - if force or self._time_to_sync(): - LOG.debug("Updating cell cache from db.") - self.last_cell_db_check = timeutils.utcnow() - ctxt = context.get_admin_context() - db_cells = self.db.cell_get_all(ctxt) - db_cells_dict = {cell['name']: cell for cell in db_cells} - self._refresh_cells_from_dict(db_cells_dict) - self._update_our_capacity(ctxt) - - @sync_after - def cell_create(self, ctxt, values): - return self.db.cell_create(ctxt, values) - - @sync_after - def cell_update(self, ctxt, cell_name, values): - return self.db.cell_update(ctxt, cell_name, values) - - @sync_after - def cell_delete(self, ctxt, cell_name): - return self.db.cell_delete(ctxt, cell_name) - - -class CellStateManagerFile(CellStateManager): - def __init__(self, cell_state_cls=None): - cells_config = CONF.cells.cells_config - self.cells_config_path = CONF.find_file(cells_config) - if not self.cells_config_path: - raise cfg.ConfigFilesNotFoundError(config_files=[cells_config]) - super(CellStateManagerFile, self).__init__(cell_state_cls) - - def _cell_data_sync(self, force=False): - """Update cell status for all cells from the backing data store - when necessary. - - :param force: If True, cell status will be updated regardless - of whether it's time to do so. - """ - reloaded, data = utils.read_cached_file(self.cells_config_path, - force_reload=force) - - if reloaded: - LOG.debug("Updating cell cache from config file.") - self.cells_config_data = jsonutils.loads(data) - self._refresh_cells_from_dict(self.cells_config_data) - - if force or self._time_to_sync(): - self.last_cell_db_check = timeutils.utcnow() - self._update_our_capacity() - - def cell_create(self, ctxt, values): - raise exception.CellsUpdateUnsupported() - - def cell_update(self, ctxt, cell_name, values): - raise exception.CellsUpdateUnsupported() - - def cell_delete(self, ctxt, cell_name): - raise exception.CellsUpdateUnsupported() diff --git a/nova/cells/utils.py b/nova/cells/utils.py deleted file mode 100644 index 8c3b4563fbd..00000000000 --- a/nova/cells/utils.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cells Utility Methods -""" -import random -import sys - -import six - -import nova.conf -from nova import objects -from nova.objects import base as obj_base - - -# Separator used between cell names for the 'full cell name' and routing -# path -PATH_CELL_SEP = '!' -# Flag prepended to a cell name to indicate data shouldn't be synced during -# an instance save. There are no illegal chars in a cell name so using the -# meaningful PATH_CELL_SEP in an invalid way will need to suffice. -BLOCK_SYNC_FLAG = '!!' -# Separator used between cell name and item -CELL_ITEM_SEP = '@' - -CONF = nova.conf.CONF - - -class ProxyObjectSerializer(obj_base.NovaObjectSerializer): - def __init__(self): - super(ProxyObjectSerializer, self).__init__() - self.serializer = super(ProxyObjectSerializer, self) - - def _process_object(self, context, objprim): - return _CellProxy.obj_from_primitive(self.serializer, objprim, context) - - -class _CellProxy(object): - def __init__(self, obj, cell_path): - self._obj = obj - self._cell_path = cell_path - - @property - def id(self): - return cell_with_item(self._cell_path, self._obj.id) - - @property - def host(self): - return cell_with_item(self._cell_path, self._obj.host) - - def __getitem__(self, key): - if key == 'id': - return self.id - if key == 'host': - return self.host - - return getattr(self._obj, key) - - def __contains__(self, key): - """Pass-through "in" check to the wrapped object. - - This is needed to proxy any types of checks in the calling code - like:: - - if 'availability_zone' in service: - ... - - :param key: They key to look for in the wrapped object. - :returns: True if key is in the wrapped object, False otherwise. - """ - return key in self._obj - - def obj_to_primitive(self): - obj_p = self._obj.obj_to_primitive() - obj_p['cell_proxy.class_name'] = self.__class__.__name__ - obj_p['cell_proxy.cell_path'] = self._cell_path - return obj_p - - @classmethod - def obj_from_primitive(cls, serializer, primitive, context=None): - obj_primitive = primitive.copy() - cell_path = obj_primitive.pop('cell_proxy.cell_path', None) - klass_name = obj_primitive.pop('cell_proxy.class_name', None) - obj = serializer._process_object(context, obj_primitive) - if klass_name is not None and cell_path is not None: - klass = getattr(sys.modules[__name__], klass_name) - return klass(obj, cell_path) - else: - return obj - - # dict-ish syntax sugar - def _iteritems(self): - """For backwards-compatibility with dict-based objects. - - NOTE(sbauza): May be removed in the future. - """ - for name in self._obj.obj_fields: - if (self._obj.obj_attr_is_set(name) or - name in self._obj.obj_extra_fields): - if name == 'id': - yield name, self.id - elif name == 'host': - yield name, self.host - else: - yield name, getattr(self._obj, name) - - if six.PY2: - iteritems = _iteritems - else: - items = _iteritems - - def __getattr__(self, key): - return getattr(self._obj, key) - - -class ComputeNodeProxy(_CellProxy): - pass - - -class ServiceProxy(_CellProxy): - def __getattr__(self, key): - if key == 'compute_node': - # NOTE(sbauza): As the Service object is still having a nested - # ComputeNode object that consumers of this Proxy don't use, we can - # safely remove it from what's returned - raise AttributeError - # NOTE(claudiub): needed for py34 compatibility. - # get self._obj first, without ending into an infinite recursion. - return getattr(self.__getattribute__("_obj"), key) - - -def get_instances_to_sync(context, updated_since=None, project_id=None, - deleted=True, shuffle=False, uuids_only=False): - """Return a generator that will return a list of active and - deleted instances to sync with parent cells. The list may - optionally be shuffled for periodic updates so that multiple - cells services aren't self-healing the same instances in nearly - lockstep. - """ - def _get_paginated_instances(context, filters, shuffle, limit, marker): - instances = objects.InstanceList.get_by_filters( - context, filters, sort_key='deleted', sort_dir='asc', - limit=limit, marker=marker) - if len(instances) > 0: - marker = instances[-1]['uuid'] - # NOTE(melwitt/alaski): Need a list that supports assignment for - # shuffle. And pop() on the returned result. - instances = list(instances) - if shuffle: - random.shuffle(instances) - return instances, marker - - filters = {} - if updated_since is not None: - filters['changes-since'] = updated_since - if project_id is not None: - filters['project_id'] = project_id - if not deleted: - filters['deleted'] = False - # Active instances first. - limit = CONF.cells.instance_update_sync_database_limit - marker = None - - instances = [] - while True: - if not instances: - instances, marker = _get_paginated_instances(context, filters, - shuffle, limit, marker) - if not instances: - break - instance = instances.pop(0) - if uuids_only: - yield instance.uuid - else: - yield instance - - -def cell_with_item(cell_name, item): - """Turn cell_name and item into @.""" - if cell_name is None: - return item - return cell_name + CELL_ITEM_SEP + str(item) - - -def split_cell_and_item(cell_and_item): - """Split a combined cell@item and return them.""" - result = cell_and_item.rsplit(CELL_ITEM_SEP, 1) - if len(result) == 1: - return (None, cell_and_item) - else: - return result - - -def add_cell_to_compute_node(compute_node, cell_name): - """Fix compute_node attributes that should be unique. Allows - API cell to query the 'id' by cell@id. - """ - # NOTE(sbauza): As compute_node is a ComputeNode object, we need to wrap it - # for adding the cell_path information - compute_proxy = ComputeNodeProxy(compute_node, cell_name) - return compute_proxy - - -def add_cell_to_service(service, cell_name): - """Fix service attributes that should be unique. Allows - API cell to query the 'id' or 'host' by cell@id/host. - """ - # NOTE(sbauza): As service is a Service object, we need to wrap it - # for adding the cell_path information - service_proxy = ServiceProxy(service, cell_name) - return service_proxy - - -def add_cell_to_task_log(task_log, cell_name): - """Fix task_log attributes that should be unique. In particular, - the 'id' and 'host' fields should be prepended with cell name. - """ - task_log['id'] = cell_with_item(cell_name, task_log['id']) - task_log['host'] = cell_with_item(cell_name, task_log['host']) diff --git a/nova/cells/weights/__init__.py b/nova/cells/weights/__init__.py deleted file mode 100644 index 202a6a31a89..00000000000 --- a/nova/cells/weights/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2012-2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cell Scheduler weights -""" - -from nova import weights - - -class WeightedCell(weights.WeighedObject): - def __repr__(self): - return "WeightedCell [cell: %s, weight: %s]" % ( - self.obj.name, self.weight) - - -class BaseCellWeigher(weights.BaseWeigher): - """Base class for cell weights.""" - pass - - -class CellWeightHandler(weights.BaseWeightHandler): - object_class = WeightedCell - - def __init__(self): - super(CellWeightHandler, self).__init__(BaseCellWeigher) - - -def all_weighers(): - """Return a list of weight plugin classes found in this directory.""" - return CellWeightHandler().get_all_classes() diff --git a/nova/cells/weights/mute_child.py b/nova/cells/weights/mute_child.py deleted file mode 100644 index 6cf7f46817c..00000000000 --- a/nova/cells/weights/mute_child.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -If a child cell hasn't sent capacity or capability updates in a while, -downgrade its likelihood of being chosen for scheduling requests. -""" - -from oslo_log import log as logging -from oslo_utils import timeutils - -from nova.cells import weights -import nova.conf - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -class MuteChildWeigher(weights.BaseCellWeigher): - """If a child cell hasn't been heard from, greatly lower its selection - weight. - """ - - MUTE_WEIGH_VALUE = 1.0 - - def weight_multiplier(self): - # negative multiplier => lower weight - return CONF.cells.mute_weight_multiplier - - def _weigh_object(self, cell, weight_properties): - """Check cell against the last_seen timestamp that indicates the time - that the most recent capability or capacity update was received from - the given cell. - """ - - last_seen = cell.last_seen - secs = CONF.cells.mute_child_interval - - if timeutils.is_older_than(last_seen, secs): - # yep, that's a mute child; recommend highly that it be skipped! - LOG.warning("%(cell)s has not been seen since %(last_seen)s " - "and is being treated as mute.", - {'cell': cell, 'last_seen': last_seen}) - return self.MUTE_WEIGH_VALUE - else: - return 0 diff --git a/nova/cells/weights/ram_by_instance_type.py b/nova/cells/weights/ram_by_instance_type.py deleted file mode 100644 index 90052be0e66..00000000000 --- a/nova/cells/weights/ram_by_instance_type.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2012-2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Weigh cells by memory needed in a way that spreads instances. -""" - -from nova.cells import weights -import nova.conf - - -CONF = nova.conf.CONF - - -class RamByInstanceTypeWeigher(weights.BaseCellWeigher): - """Weigh cells by instance_type requested.""" - - def weight_multiplier(self): - return CONF.cells.ram_weight_multiplier - - def _weigh_object(self, cell, weight_properties): - """Use the 'ram_free' for a particular instance_type advertised from a - child cell's capacity to compute a weight. We want to direct the - build to a cell with a higher capacity. Since higher weights win, - we just return the number of units available for the instance_type. - """ - request_spec = weight_properties['request_spec'] - instance_type = request_spec['instance_type'] - memory_needed = instance_type['memory_mb'] - - ram_free = cell.capacities.get('ram_free', {}) - units_by_mb = ram_free.get('units_by_mb', {}) - - return units_by_mb.get(str(memory_needed), 0) diff --git a/nova/cells/weights/weight_offset.py b/nova/cells/weights/weight_offset.py deleted file mode 100644 index adbb0c57308..00000000000 --- a/nova/cells/weights/weight_offset.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2012-2013 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Weigh cells by their weight_offset in the DB. Cells with higher -weight_offsets in the DB will be preferred. -""" - -from nova.cells import weights -import nova.conf - - -CONF = nova.conf.CONF - - -class WeightOffsetWeigher(weights.BaseCellWeigher): - """Weight cell by weight_offset db field. - Originally designed so you can set a default cell by putting - its weight_offset to 999999999999999 (highest weight wins) - """ - - def weight_multiplier(self): - return CONF.cells.offset_weight_multiplier - - def _weigh_object(self, cell, weight_properties): - """Returns whatever was in the DB for weight_offset.""" - return cell.db_info.get('weight_offset', 0) diff --git a/nova/cmd/__init__.py b/nova/cmd/__init__.py index 1b1ddf772c1..726e0c3da54 100644 --- a/nova/cmd/__init__.py +++ b/nova/cmd/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2019 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,20 +13,4 @@ # License for the specific language governing permissions and limitations # under the License. -import eventlet -from oslo_utils import importutils -from six.moves import reload_module - -from nova import debugger - -if debugger.enabled(): - # turn off thread patching to enable the remote debugger - eventlet.monkey_patch(os=False, thread=False) -else: - eventlet.monkey_patch(os=False) - -# NOTE(rgerganov): oslo.context is storing a global thread-local variable -# which keeps the request context for the current thread. If oslo.context is -# imported before calling monkey_patch(), then this thread-local won't be -# green. To workaround this, reload the module after calling monkey_patch() -reload_module(importutils.import_module('oslo_context.context')) +import nova.monkey_patch # noqa diff --git a/nova/cmd/api.py b/nova/cmd/api.py index 2080b0d5e81..b6a9a2e284c 100644 --- a/nova/cmd/api.py +++ b/nova/cmd/api.py @@ -27,13 +27,16 @@ from oslo_reports import opts as gmr_opts import nova.conf +from nova.conf import remote_debug from nova import config from nova import exception from nova import objects +from nova import quota from nova import service from nova import version CONF = nova.conf.CONF +remote_debug.register_cli_opts(CONF) def main(): @@ -51,6 +54,7 @@ def main(): launcher = service.process_launcher() started = 0 + quota.QUOTAS.initialize() for api in CONF.enabled_apis: should_use_ssl = api in CONF.enabled_ssl_apis try: diff --git a/nova/cmd/api_metadata.py b/nova/cmd/api_metadata.py index 493f3e0c8e9..0d95de2f78e 100644 --- a/nova/cmd/api_metadata.py +++ b/nova/cmd/api_metadata.py @@ -24,6 +24,7 @@ from nova.conductor import rpcapi as conductor_rpcapi import nova.conf +from nova.conf import remote_debug from nova import config from nova import objects from nova.objects import base as objects_base @@ -32,6 +33,7 @@ CONF = nova.conf.CONF +remote_debug.register_cli_opts(CONF) def main(): diff --git a/nova/cmd/api_os_compute.py b/nova/cmd/api_os_compute.py index f7d3beb10c6..643c3ed7b10 100644 --- a/nova/cmd/api_os_compute.py +++ b/nova/cmd/api_os_compute.py @@ -23,6 +23,7 @@ from oslo_reports import opts as gmr_opts import nova.conf +from nova.conf import remote_debug from nova import config from nova import objects from nova import service @@ -30,6 +31,7 @@ CONF = nova.conf.CONF +remote_debug.register_cli_opts(CONF) def main(): diff --git a/nova/cmd/baseproxy.py b/nova/cmd/baseproxy.py index 020d0aadf78..fdd4f28892b 100644 --- a/nova/cmd/baseproxy.py +++ b/nova/cmd/baseproxy.py @@ -26,13 +26,16 @@ import nova.conf from nova.conf import novnc +from nova.conf import remote_debug from nova.console import websocketproxy from nova import objects from nova import version CONF = nova.conf.CONF +remote_debug.register_cli_opts(CONF) novnc.register_cli_opts(CONF) + gmr_opts.set_defaults(CONF) objects.register_all() @@ -72,6 +75,8 @@ def proxy(host, port, security_proxy=None): cert=CONF.cert, key=CONF.key, ssl_only=CONF.ssl_only, + ssl_ciphers=CONF.console.ssl_ciphers, + ssl_minimum_version=CONF.console.ssl_minimum_version, daemon=CONF.daemon, record=CONF.record, traffic=not CONF.daemon, diff --git a/nova/cmd/bigvm.py b/nova/cmd/bigvm.py new file mode 100644 index 00000000000..0d39516d4d8 --- /dev/null +++ b/nova/cmd/bigvm.py @@ -0,0 +1,44 @@ +# Copyright 2019 SAP SE +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for BigVM manager.""" + +import sys + +from oslo_log import log as logging +from oslo_reports import guru_meditation_report as gmr +from oslo_reports import opts as gmr_opts + +import nova.conf +from nova import config +from nova import objects +from nova import service +from nova import version + +CONF = nova.conf.CONF + + +def main(): + config.parse_args(sys.argv) + logging.setup(CONF, "nova") + objects.register_all() + gmr_opts.set_defaults(CONF) + objects.Service.enable_min_version_cache() + + gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) + + server = service.Service.create(binary='nova-bigvm') + service.serve(server) + service.wait() diff --git a/nova/cmd/cells.py b/nova/cmd/cells.py deleted file mode 100644 index 87734babef0..00000000000 --- a/nova/cmd/cells.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2012 Rackspace Hosting -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for Nova Cells Service.""" - -import sys - -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -from nova import cells -import nova.conf -from nova import config -from nova import objects -from nova import service -from nova import version - -CONF = nova.conf.CONF -LOG = logging.getLogger('nova.cells') - - -def main(): - config.parse_args(sys.argv) - logging.setup(CONF, 'nova') - objects.register_all() - gmr_opts.set_defaults(CONF) - - gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - - LOG.warning('Cells v1 is deprecated in favor of Cells v2 and will be ' - 'removed in the future.') - server = service.Service.create(binary='nova-cells', - topic=cells.TOPIC, - manager='nova.cells.manager.CellsManager') - service.serve(server) - service.wait() diff --git a/nova/cmd/common.py b/nova/cmd/common.py index e65f78b735c..88dabc2c454 100644 --- a/nova/cmd/common.py +++ b/nova/cmd/common.py @@ -17,41 +17,20 @@ Common functions used by different CLI interfaces. """ -from __future__ import print_function - import argparse -import traceback +import inspect from oslo_log import log as logging -import six import nova.conf -import nova.db.api +import nova.db.main.api from nova import exception from nova.i18n import _ -from nova import utils CONF = nova.conf.CONF LOG = logging.getLogger(__name__) -def block_db_access(service_name): - """Blocks Nova DB access.""" - - class NoDB(object): - def __getattr__(self, attr): - return self - - def __call__(self, *args, **kwargs): - stacktrace = "".join(traceback.format_stack()) - LOG.error('No db access allowed in %(service_name)s: ' - '%(stacktrace)s', - dict(service_name=service_name, stacktrace=stacktrace)) - raise exception.DBNotAllowed(service_name) - - nova.db.api.IMPL = NoDB() - - def validate_args(fn, *args, **kwargs): """Check that the supplied args are sufficient for calling a function. @@ -68,12 +47,12 @@ def validate_args(fn, *args, **kwargs): :param arg: the positional arguments supplied :param kwargs: the keyword arguments supplied """ - argspec = utils.getargspec(fn) + argspec = inspect.getfullargspec(fn) num_defaults = len(argspec.defaults or []) required_args = argspec.args[:len(argspec.args) - num_defaults] - if six.get_method_self(fn) is not None: + if fn.__self__ is not None: required_args.pop(0) missing = [arg for arg in required_args if arg not in kwargs] @@ -124,6 +103,7 @@ def add_command_parsers(subparsers, categories): parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') + category_subparsers.required = True for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser( @@ -169,7 +149,7 @@ def get_action_fn(): fn = CONF.category.action_fn fn_args = [] for arg in CONF.category.action_args: - if isinstance(arg, six.binary_type): + if isinstance(arg, bytes): arg = arg.decode('utf-8') fn_args.append(arg) @@ -178,7 +158,7 @@ def get_action_fn(): v = getattr(CONF.category, 'action_kwarg_' + k) if v is None: continue - if isinstance(v, six.binary_type): + if isinstance(v, bytes): v = v.decode('utf-8') fn_kwargs[k] = v diff --git a/nova/cmd/compute.py b/nova/cmd/compute.py index 01fd20de2e5..a433aeb6931 100644 --- a/nova/cmd/compute.py +++ b/nova/cmd/compute.py @@ -25,11 +25,12 @@ from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts -from nova.cmd import common as cmd_common from nova.compute import rpcapi as compute_rpcapi from nova.conductor import rpcapi as conductor_rpcapi import nova.conf +from nova.conf import remote_debug from nova import config +import nova.db.main.api from nova import objects from nova.objects import base as objects_base from nova import service @@ -37,6 +38,7 @@ from nova import version CONF = nova.conf.CONF +remote_debug.register_cli_opts(CONF) def main(): @@ -50,7 +52,8 @@ def main(): gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - cmd_common.block_db_access('nova-compute') + # disable database access for this service + nova.db.main.api.DISABLE_DB_ACCESS = True objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() objects.Service.enable_min_version_cache() server = service.Service.create(binary='nova-compute', diff --git a/nova/cmd/conductor.py b/nova/cmd/conductor.py index 16847c2e561..f7ae1f6b8f8 100644 --- a/nova/cmd/conductor.py +++ b/nova/cmd/conductor.py @@ -23,12 +23,14 @@ from nova.conductor import rpcapi import nova.conf +from nova.conf import remote_debug from nova import config from nova import objects from nova import service from nova import version CONF = nova.conf.CONF +remote_debug.register_cli_opts(CONF) def main(): diff --git a/nova/cmd/console.py b/nova/cmd/console.py deleted file mode 100644 index e25e2114ba3..00000000000 --- a/nova/cmd/console.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Starter script for Nova Console Proxy.""" - -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -from nova import config -from nova.console import rpcapi as console_rpcapi -from nova import objects -from nova import service -from nova import version - -CONF = cfg.CONF - - -def main(): - config.parse_args(sys.argv) - logging.setup(CONF, "nova") - objects.register_all() - gmr_opts.set_defaults(CONF) - - gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - - server = service.Service.create(binary='nova-console', - topic=console_rpcapi.RPC_TOPIC) - service.serve(server) - service.wait() diff --git a/nova/cmd/consoleauth.py b/nova/cmd/consoleauth.py deleted file mode 100644 index e28d56a10b0..00000000000 --- a/nova/cmd/consoleauth.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""VNC Console Proxy Server.""" - -import sys - -from oslo_log import log as logging -from oslo_reports import guru_meditation_report as gmr -from oslo_reports import opts as gmr_opts - -import nova.conf -from nova import config -from nova.consoleauth import rpcapi -from nova import objects -from nova import service -from nova import version - -CONF = nova.conf.CONF -LOG = logging.getLogger('nova.consoleauth') - - -def main(): - config.parse_args(sys.argv) - logging.setup(CONF, "nova") - objects.register_all() - gmr_opts.set_defaults(CONF) - - gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) - - LOG.warning('The nova-consoleauth service is deprecated as console token ' - 'authorization storage has moved from the nova-consoleauth ' - 'service backend to the database backend.') - - server = service.Service.create(binary='nova-consoleauth', - topic=rpcapi.RPC_TOPIC) - service.serve(server) - service.wait() diff --git a/nova/cmd/dhcpbridge.py b/nova/cmd/dhcpbridge.py deleted file mode 100644 index 3d3ab9dbad5..00000000000 --- a/nova/cmd/dhcpbridge.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handle lease database updates from DHCP servers. -""" - -from __future__ import print_function - -import os -import sys - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import importutils - -from nova.cmd import common as cmd_common -from nova.conductor import rpcapi as conductor_rpcapi -import nova.conf -from nova import config -from nova import context -from nova.network import rpcapi as network_rpcapi -from nova import objects -from nova.objects import base as objects_base -from nova import rpc - -CONF = nova.conf.CONF -LOG = logging.getLogger(__name__) - - -def add_lease(mac, ip_address): - """Set the IP that was assigned by the DHCP server.""" - api = network_rpcapi.NetworkAPI() - api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host) - - -def old_lease(mac, ip_address): - """Called when an old lease is recognized.""" - # NOTE(vish): We assume we heard about this lease the first time. - # If not, we will get it the next time the lease is - # renewed. - pass - - -def del_lease(mac, ip_address): - """Called when a lease expires.""" - api = network_rpcapi.NetworkAPI() - api.release_fixed_ip(context.get_admin_context(), ip_address, - CONF.host, mac) - - -def init_leases(network_id): - """Get the list of hosts for a network.""" - ctxt = context.get_admin_context() - network = objects.Network.get_by_id(ctxt, network_id) - network_manager = importutils.import_object(CONF.network_manager) - return network_manager.get_dhcp_leases(ctxt, network) - - -def add_action_parsers(subparsers): - subparsers.add_parser('init') - - # NOTE(cfb): dnsmasq always passes mac, and ip. hostname - # is passed if known. We don't care about - # hostname, but argparse will complain if we - # do not accept it. - actions = { - 'add': add_lease, - 'del': del_lease, - 'old': old_lease, - } - for action, func in actions.items(): - parser = subparsers.add_parser(action) - parser.add_argument('mac') - parser.add_argument('ip') - parser.add_argument('hostname', nargs='?', default='') - parser.set_defaults(func=func) - - -CONF.register_cli_opt( - cfg.SubCommandOpt('action', - title='Action options', - help='Available dhcpbridge options', - handler=add_action_parsers)) - - -def main(): - """Parse environment and arguments and call the appropriate action.""" - config.parse_args(sys.argv, - default_config_files=jsonutils.loads(os.environ['CONFIG_FILE'])) - - logging.setup(CONF, "nova") - global LOG - LOG = logging.getLogger('nova.dhcpbridge') - - if CONF.action.name == 'old': - # NOTE(sdague): old is the most frequent message sent, and - # it's a noop. We should just exit immediately otherwise we - # can stack up a bunch of requests in dnsmasq. A SIGHUP seems - # to dump this list, so actions queued up get lost. - return - - objects.register_all() - - cmd_common.block_db_access('nova-dhcpbridge') - objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI() - - if CONF.action.name in ['add', 'del']: - LOG.debug("Called '%(action)s' for mac '%(mac)s' with IP '%(ip)s'", - {"action": CONF.action.name, - "mac": CONF.action.mac, - "ip": CONF.action.ip}) - CONF.action.func(CONF.action.mac, CONF.action.ip) - else: - try: - network_id = int(os.environ.get('NETWORK_ID')) - except TypeError: - LOG.error("Environment variable 'NETWORK_ID' must be set.") - return 1 - - print(init_leases(network_id)) - - rpc.cleanup() diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py index c08bee87fe5..e3c61a26a38 100644 --- a/nova/cmd/manage.py +++ b/nova/cmd/manage.py @@ -21,67 +21,80 @@ CLI interface for nova management. """ -from __future__ import print_function - -import argparse +import collections import functools +import os import re import sys +import time import traceback +from urllib import parse as urlparse from dateutil import parser as dateutil_parser -import decorator -import netaddr +from keystoneauth1 import exceptions as ks_exc +from neutronclient.common import exceptions as neutron_client_exc +from os_brick.initiator import connector +import os_resource_classes as orc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging as messaging +from oslo_serialization import jsonutils from oslo_utils import encodeutils -from oslo_utils import importutils from oslo_utils import uuidutils import prettytable -import six -import six.moves.urllib.parse as urlparse from sqlalchemy.engine import url as sqla_url -from nova.api.openstack.placement import db_api as placement_db -from nova.api.openstack.placement.objects import consumer as consumer_obj from nova.cmd import common as cmd_common -from nova.compute import api as compute_api +from nova.compute import api +from nova.compute import instance_actions +from nova.compute import rpcapi +from nova.compute import vm_states import nova.conf from nova import config from nova import context -from nova.db import api as db +from nova.db import constants as db_const +from nova.db.main import api as db from nova.db import migration -from nova.db.sqlalchemy import api as sa_db from nova import exception from nova.i18n import _ +from nova.network import constants +from nova.network import neutron as neutron_api from nova import objects from nova.objects import block_device as block_device_obj -from nova.objects import build_request as build_request_obj +from nova.objects import compute_node as compute_node_obj +from nova.objects import fields as obj_fields from nova.objects import host_mapping as host_mapping_obj from nova.objects import instance as instance_obj from nova.objects import instance_mapping as instance_mapping_obj -from nova.objects import keypair as keypair_obj +from nova.objects import pci_device as pci_device_obj from nova.objects import quotas as quotas_obj -from nova.objects import request_spec -from nova import quota +from nova.objects import virtual_interface as virtual_interface_obj from nova import rpc from nova.scheduler.client import report from nova.scheduler import utils as scheduler_utils from nova import utils from nova import version -from nova.virt import ironic +from nova.virt.libvirt import machine_type_utils +from nova.volume import cinder CONF = nova.conf.CONF - -QUOTAS = quota.QUOTAS +LOG = logging.getLogger(__name__) # Keep this list sorted and one entry per line for readability. -_EXTRA_DEFAULT_LOG_LEVELS = ['oslo_concurrency=INFO', - 'oslo_db=INFO', - 'oslo_policy=INFO'] - +_EXTRA_DEFAULT_LOG_LEVELS = [ + 'nova=ERROR', + 'oslo_concurrency=INFO', + 'oslo_db=INFO', + 'oslo_policy=INFO', + 'oslo.privsep=ERROR', + 'os_brick=ERROR', +] + +# Consts indicating whether allocations need to be healed by creating them or +# by updating existing allocations. +_CREATE = 'create' +_UPDATE = 'update' # Decorators for actions args = cmd_common.args @@ -98,274 +111,33 @@ def mask_passwd_in_url(url): return urlparse.urlunparse(new_parsed) -def _db_error(caught_exception): - print(caught_exception) - print(_("The above error may show that the database has not " - "been created.\nPlease create a database using " - "'nova-manage db sync' before running this command.")) - sys.exit(1) - - -class FloatingIpCommands(object): - """Class for managing floating IP.""" - - # TODO(stephenfin): Remove these when we remove cells v1 - description = ('DEPRECATED: Floating IP commands are deprecated since ' - 'nova-network is deprecated in favor of Neutron. The ' - 'floating IP commands will be removed in an upcoming ' - 'release.') - - @staticmethod - def address_to_hosts(addresses): - """Iterate over hosts within an address range. - - If an explicit range specifier is missing, the parameter is - interpreted as a specific individual address. - """ - try: - return [netaddr.IPAddress(addresses)] - except ValueError: - net = netaddr.IPNetwork(addresses) - if net.size < 4: - reason = _("/%s should be specified as single address(es) " - "not in cidr format") % net.prefixlen - raise exception.InvalidInput(reason=reason) - elif net.size >= 1000000: - # NOTE(dripton): If we generate a million IPs and put them in - # the database, the system will slow to a crawl and/or run - # out of memory and crash. This is clearly a misconfiguration. - reason = _("Too many IP addresses will be generated. Please " - "increase /%s to reduce the number generated." - ) % net.prefixlen - raise exception.InvalidInput(reason=reason) - else: - return net.iter_hosts() - - @args('--ip_range', metavar='', help='IP range') - @args('--pool', metavar='', help='Optional pool') - @args('--interface', metavar='', help='Optional interface') - def create(self, ip_range, pool=None, interface=None): - """Creates floating IPs for zone by range.""" - admin_context = context.get_admin_context() - if not pool: - pool = CONF.default_floating_pool - if not interface: - interface = CONF.public_interface - - ips = [{'address': str(address), 'pool': pool, 'interface': interface} - for address in self.address_to_hosts(ip_range)] - try: - db.floating_ip_bulk_create(admin_context, ips, want_result=False) - except exception.FloatingIpExists as exc: - # NOTE(simplylizz): Maybe logging would be better here - # instead of printing, but logging isn't used here and I - # don't know why. - print('error: %s' % exc) - return 1 - - @args('--ip_range', metavar='', help='IP range') - def delete(self, ip_range): - """Deletes floating IPs by range.""" - admin_context = context.get_admin_context() - - ips = ({'address': str(address)} - for address in self.address_to_hosts(ip_range)) - db.floating_ip_bulk_destroy(admin_context, ips) - - @args('--host', metavar='', help='Host') - def list(self, host=None): - """Lists all floating IPs (optionally by host). - - Note: if host is given, only active floating IPs are returned - """ - ctxt = context.get_admin_context() - try: - if host is None: - floating_ips = db.floating_ip_get_all(ctxt) - else: - floating_ips = db.floating_ip_get_all_by_host(ctxt, host) - except exception.NoFloatingIpsDefined: - print(_("No floating IP addresses have been defined.")) - return - for floating_ip in floating_ips: - instance_uuid = None - if floating_ip['fixed_ip_id']: - fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id']) - instance_uuid = fixed_ip['instance_uuid'] - - print("%s\t%s\t%s\t%s\t%s" % (floating_ip['project_id'], - floating_ip['address'], - instance_uuid, - floating_ip['pool'], - floating_ip['interface'])) - - -@decorator.decorator -def validate_network_plugin(f, *args, **kwargs): - """Decorator to validate the network plugin.""" - if utils.is_neutron(): - print(_("ERROR: Network commands are not supported when using the " - "Neutron API. Use python-neutronclient instead.")) - return 2 - return f(*args, **kwargs) - - -class NetworkCommands(object): - """Class for managing networks.""" - - # TODO(stephenfin): Remove these when we remove cells v1 - description = ('DEPRECATED: Network commands are deprecated since ' - 'nova-network is deprecated in favor of Neutron. The ' - 'network commands will be removed in an upcoming release.') - - @validate_network_plugin - @args('--label', metavar='