diff --git a/doc/notification_samples/instance-create-error.json b/doc/notification_samples/instance-create-error.json index 095cfbf4f48..cd824eda298 100644 --- a/doc/notification_samples/instance-create-error.json +++ b/doc/notification_samples/instance-create-error.json @@ -18,7 +18,9 @@ "ip_addresses": [], "launched_at": null, "power_state": "pending", - "state": "building" + "state": "building", + "host": null, + "node": null } }, "priority":"ERROR", diff --git a/doc/source/cli/nova-manage.rst b/doc/source/cli/nova-manage.rst index 53152a0a6fd..082f8e88528 100644 --- a/doc/source/cli/nova-manage.rst +++ b/doc/source/cli/nova-manage.rst @@ -1531,7 +1531,9 @@ command. * - 5 - Instance state invalid (must be stopped and unlocked) * - 6 - - Instance is not attached to volume + - Volume is not attached to the instance + * - 7 + - Connector host is not correct Libvirt Commands diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py index 5d4926b3ec1..c2678d73686 100644 --- a/nova/cmd/manage.py +++ b/nova/cmd/manage.py @@ -22,6 +22,7 @@ """ import collections +from contextlib import contextmanager import functools import os import re @@ -144,6 +145,33 @@ def format_dict(dct, dict_property="Property", dict_value='Value', return encodeutils.safe_encode(pt.get_string()).decode() +@contextmanager +def locked_instance(cell_mapping, instance, reason): + """Context manager to lock and unlock instance, + lock state will be restored regardless of the success or failure + of target functionality. + + :param cell_mapping: instance-cell-mapping + :param instance: instance to be lock and unlock + :param reason: reason, why lock is required + """ + + compute_api = api.API() + + initial_state = 'locked' if instance.locked else 'unlocked' + if not instance.locked: + with context.target_cell( + context.get_admin_context(), cell_mapping) as cctxt: + compute_api.lock(cctxt, instance, reason=reason) + try: + yield + finally: + if initial_state == 'unlocked': + with context.target_cell( + context.get_admin_context(), cell_mapping) as cctxt: + compute_api.unlock(cctxt, instance) + + class DbCommands(object): """Class for managing the main database.""" @@ -2998,10 +3026,8 @@ def _refresh(self, instance_uuid, volume_id, connector): :param instance_uuid: UUID of instance :param volume_id: ID of volume attached to the instance :param connector: Connector with which to create the new attachment + :return status_code: volume-refresh status_code 0 on success """ - volume_api = cinder.API() - compute_rpcapi = rpcapi.ComputeAPI() - compute_api = api.API() ctxt = context.get_admin_context() im = objects.InstanceMapping.get_by_instance_uuid(ctxt, instance_uuid) @@ -3017,111 +3043,104 @@ def _refresh(self, instance_uuid, volume_id, connector): state=instance.vm_state, method='refresh connection_info (must be stopped)') - if instance.locked: - raise exception.InstanceInvalidState( - instance_uuid=instance_uuid, attr='locked', state='True', - method='refresh connection_info (must be unlocked)') - - compute_api.lock( - cctxt, instance, - reason=( - f'Refreshing connection_info for BDM {bdm.uuid} ' - f'associated with instance {instance_uuid} and volume ' - f'{volume_id}.')) - - # NOTE(lyarwood): Yes this is weird but we need to recreate the admin - # context here to ensure the lock above uses a unique request-id - # versus the following refresh and eventual unlock. - ctxt = context.get_admin_context() - with context.target_cell(ctxt, im.cell_mapping) as cctxt: - instance_action = None - new_attachment_id = None - try: - # Log this as an instance action so operators and users are - # aware that this has happened. - instance_action = objects.InstanceAction.action_start( - cctxt, instance_uuid, - instance_actions.NOVA_MANAGE_REFRESH_VOLUME_ATTACHMENT) - - # Create a blank attachment to keep the volume reserved - new_attachment_id = volume_api.attachment_create( - cctxt, volume_id, instance_uuid)['id'] - - # RPC call to the compute to cleanup the connections, which - # will in turn unmap the volume from the compute host - # TODO(lyarwood): Add delete_attachment as a kwarg to - # remove_volume_connection as is available in the private - # method within the manager. + locking_reason = ( + f'Refreshing connection_info for BDM {bdm.uuid} ' + f'associated with instance {instance_uuid} and volume ' + f'{volume_id}.') + + with locked_instance(im.cell_mapping, instance, locking_reason): + return self._do_refresh( + cctxt, instance, volume_id, bdm, connector) + + def _do_refresh(self, cctxt, instance, + volume_id, bdm, connector): + volume_api = cinder.API() + compute_rpcapi = rpcapi.ComputeAPI() + + new_attachment_id = None + try: + # Log this as an instance action so operators and users are + # aware that this has happened. + instance_action = objects.InstanceAction.action_start( + cctxt, instance.uuid, + instance_actions.NOVA_MANAGE_REFRESH_VOLUME_ATTACHMENT) + + # Create a blank attachment to keep the volume reserved + new_attachment_id = volume_api.attachment_create( + cctxt, volume_id, instance.uuid)['id'] + + # RPC call to the compute to cleanup the connections, which + # will in turn unmap the volume from the compute host + # TODO(lyarwood): Add delete_attachment as a kwarg to + # remove_volume_connection as is available in the private + # method within the manager. + if instance.host == connector['host']: compute_rpcapi.remove_volume_connection( cctxt, instance, volume_id, instance.host) + else: + msg = ( + f"The compute host '{connector['host']}' in the " + f"connector does not match the instance host " + f"'{instance.host}'.") + raise exception.HostConflict(_(msg)) + + # Delete the existing volume attachment if present in the bdm. + # This isn't present when the original attachment was made + # using the legacy cinderv2 APIs before the cinderv3 attachment + # based APIs were present. + if bdm.attachment_id: + volume_api.attachment_delete(cctxt, bdm.attachment_id) + + # Update the attachment with host connector, this regenerates + # the connection_info that we can now stash in the bdm. + new_connection_info = volume_api.attachment_update( + cctxt, new_attachment_id, connector, + bdm.device_name)['connection_info'] + + # Before we save it to the BDM ensure the serial is stashed as + # is done in various other codepaths when attaching volumes. + if 'serial' not in new_connection_info: + new_connection_info['serial'] = bdm.volume_id + + # Save the new attachment id and connection_info to the DB + bdm.attachment_id = new_attachment_id + bdm.connection_info = jsonutils.dumps(new_connection_info) + bdm.save() + + # Finally mark the attachment as complete, moving the volume + # status from attaching to in-use ahead of the instance + # restarting + volume_api.attachment_complete(cctxt, new_attachment_id) + return 0 - # Delete the existing volume attachment if present in the bdm. - # This isn't present when the original attachment was made - # using the legacy cinderv2 APIs before the cinderv3 attachment - # based APIs were present. - if bdm.attachment_id: - volume_api.attachment_delete(cctxt, bdm.attachment_id) - - # Update the attachment with host connector, this regenerates - # the connection_info that we can now stash in the bdm. - new_connection_info = volume_api.attachment_update( - cctxt, new_attachment_id, connector, - bdm.device_name)['connection_info'] - - # Before we save it to the BDM ensure the serial is stashed as - # is done in various other codepaths when attaching volumes. - if 'serial' not in new_connection_info: - new_connection_info['serial'] = bdm.volume_id - - # Save the new attachment id and connection_info to the DB - bdm.attachment_id = new_attachment_id - bdm.connection_info = jsonutils.dumps(new_connection_info) + finally: + # If the bdm.attachment_id wasn't updated make sure we clean + # up any attachments created during the run. + bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( + cctxt, volume_id, instance.uuid) + if ( + new_attachment_id and + bdm.attachment_id != new_attachment_id + ): + volume_api.attachment_delete(cctxt, new_attachment_id) + + # If we failed during attachment_update the bdm.attachment_id + # has already been deleted so recreate it now to ensure the + # volume is still associated with the instance and clear the + # now stale connection_info. + try: + volume_api.attachment_get(cctxt, bdm.attachment_id) + except exception.VolumeAttachmentNotFound: + bdm.attachment_id = volume_api.attachment_create( + cctxt, volume_id, instance.uuid)['id'] + bdm.connection_info = None bdm.save() - # Finally mark the attachment as complete, moving the volume - # status from attaching to in-use ahead of the instance - # restarting - volume_api.attachment_complete(cctxt, new_attachment_id) - return 0 - - finally: - # If the bdm.attachment_id wasn't updated make sure we clean - # up any attachments created during the run. - bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( - cctxt, volume_id, instance_uuid) - if ( - new_attachment_id and - bdm.attachment_id != new_attachment_id - ): - volume_api.attachment_delete(cctxt, new_attachment_id) - - # If we failed during attachment_update the bdm.attachment_id - # has already been deleted so recreate it now to ensure the - # volume is still associated with the instance and clear the - # now stale connection_info. - try: - volume_api.attachment_get(cctxt, bdm.attachment_id) - except exception.VolumeAttachmentNotFound: - bdm.attachment_id = volume_api.attachment_create( - cctxt, volume_id, instance_uuid)['id'] - bdm.connection_info = None - bdm.save() - - # Finish the instance action if it was created and started - # TODO(lyarwood): While not really required we should store - # the exec and traceback in here on failure. - if instance_action: - instance_action.finish() - - # NOTE(lyarwood): As above we need to unlock the instance with - # a fresh context and request-id to keep it unique. It's safe - # to assume that the instance is locked as this point as the - # earlier call to lock isn't part of this block. - with context.target_cell( - context.get_admin_context(), - im.cell_mapping - ) as u_cctxt: - compute_api.unlock(u_cctxt, instance) + # Finish the instance action if it was created and started + # TODO(lyarwood): While not really required we should store + # the exec and traceback in here on failure. + if instance_action: + instance_action.finish() @action_description( _("Refresh the connection info for a given volume attachment")) @@ -3145,6 +3164,7 @@ def refresh(self, instance_uuid=None, volume_id=None, connector_path=None): * 4: Instance does not exist. * 5: Instance state invalid. * 6: Volume is not attached to instance. + * 7: Connector host is not correct. """ try: # TODO(lyarwood): Make this optional and provide a rpcapi capable @@ -3160,6 +3180,12 @@ def refresh(self, instance_uuid=None, volume_id=None, connector_path=None): # Refresh the volume attachment return self._refresh(instance_uuid, volume_id, connector) + except exception.HostConflict as e: + print( + f"The command 'nova-manage volume_attachment get_connector' " + f"may have been run on the wrong compute host. Or the " + f"instance host may be wrong and in need of repair.\n{e}") + return 7 except exception.VolumeBDMNotFound as e: print(str(e)) return 6 @@ -3172,12 +3198,15 @@ def refresh(self, instance_uuid=None, volume_id=None, connector_path=None): ) as e: print(str(e)) return 4 - except (ValueError, OSError): + except ValueError as e: print( f'Failed to open {connector_path}. Does it contain valid ' - f'connector_info data?' + f'connector_info data?\nError: {str(e)}' ) return 3 + except OSError as e: + print(str(e)) + return 3 except exception.InvalidInput as e: print(str(e)) return 2 diff --git a/nova/compute/claims.py b/nova/compute/claims.py index 490b418081e..fa93d9ab185 100644 --- a/nova/compute/claims.py +++ b/nova/compute/claims.py @@ -64,6 +64,7 @@ def __init__( super().__init__(migration=migration) # Stash a copy of the instance at the current point of time self.instance = instance.obj_clone() + self.instance_ref = instance self.nodename = nodename self.tracker = tracker self._pci_requests = pci_requests @@ -82,7 +83,7 @@ def abort(self): been aborted. """ LOG.debug("Aborting claim: %s", self, instance=self.instance) - self.tracker.abort_instance_claim(self.context, self.instance, + self.tracker.abort_instance_claim(self.context, self.instance_ref, self.nodename) def _claim_test(self, compute_node, limits=None): diff --git a/nova/exception.py b/nova/exception.py index c3ada86c1e2..85e0a9c85dd 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -2530,3 +2530,7 @@ class NotSupportedComputeForEvacuateV295(NotSupported): "instance on destination. To evacuate before upgrades are " "complete please use an older microversion. Required version " "for compute %(expected), current version %(currently)s") + + +class HostConflict(Exception): + pass diff --git a/nova/pci/manager.py b/nova/pci/manager.py index af6d72521b0..9e3713835ad 100644 --- a/nova/pci/manager.py +++ b/nova/pci/manager.py @@ -407,7 +407,7 @@ def free_instance_allocations( for dev in self.pci_devs: if (dev.status == fields.PciDeviceStatus.ALLOCATED and dev.instance_uuid == instance['uuid']): - self._free_device(dev) + self._free_device(dev, instance) def free_instance_claims( self, context: ctx.RequestContext, instance: 'objects.Instance', @@ -423,7 +423,7 @@ def free_instance_claims( for dev in self.pci_devs: if (dev.status == fields.PciDeviceStatus.CLAIMED and dev.instance_uuid == instance['uuid']): - self._free_device(dev) + self._free_device(dev, instance) def free_instance( self, context: ctx.RequestContext, instance: 'objects.Instance', diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py index 903fd5ef16d..54429084e90 100644 --- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py +++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py @@ -2995,6 +2995,62 @@ def test_multi_create(self): self.assert_no_pci_healing("test_compute0") +class PCIResourceRequestReschedulingTest(_PCIServersTestBase): + + # Needed for networks=none + microversion = '2.37' + + PFS_ALIAS_NAME = 'pfs' + PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in ( + { + 'vendor_id': fakelibvirt.PCI_VEND_ID, + 'product_id': fakelibvirt.PF_PROD_ID, + }, + )] + PCI_ALIAS = [jsonutils.dumps(x) for x in ( + { + 'vendor_id': fakelibvirt.PCI_VEND_ID, + 'product_id': fakelibvirt.PF_PROD_ID, + 'device_type': fields.PciDeviceType.SRIOV_PF, + 'name': PFS_ALIAS_NAME, + }, + )] + + def test_boot_reschedule_with_proper_pci_device_count(self): + """Verify that in case of rescheduling instance with PCI device request + instance has proper count of pci devices. + """ + pci_info = fakelibvirt.HostPCIDevicesInfo() + self.start_compute(hostname='host1', pci_info=pci_info) + self.start_compute(hostname='host2', pci_info=pci_info) + + extra_spec = {"pci_passthrough:alias": "%s:1" % self.PFS_ALIAS_NAME} + flavor_id = self._create_flavor(extra_spec=extra_spec) + + validate_group_policy_called = False + + def validate_group_policy(manager, instance, *args, **kwargs): + nonlocal validate_group_policy_called + self.assertEqual(1, len(instance.pci_devices)) + if not validate_group_policy_called: + validate_group_policy_called = True + raise exception.RescheduledException( + instance_uuid='fake-uuid', + reason='Tests: affinity validation has to fail once') + + with mock.patch( + ('nova.compute.manager.ComputeManager.' + '_validate_instance_group_policy'), + side_effect=validate_group_policy): + server = self._create_server(flavor_id=flavor_id, networks='none') + + ctxt = context.get_admin_context() + pci_devices = objects.PciDeviceList.get_by_instance_uuid( + ctxt, server['id']) + # an additional check we have proper PCI device count in DB + self.assertEqual(1, len(pci_devices)) + + class PCIServersWithPreferredNUMATest(_PCIServersTestBase): ALIAS_NAME = 'a1' diff --git a/nova/tests/unit/cmd/test_manage.py b/nova/tests/unit/cmd/test_manage.py index 10c1a77c948..fca97180e4d 100644 --- a/nova/tests/unit/cmd/test_manage.py +++ b/nova/tests/unit/cmd/test_manage.py @@ -3468,10 +3468,23 @@ def test_refresh_invalid_connector_path_file(self, mock_exists): output = self.output.getvalue().strip() self.assertIn('Failed to open fake_path', output) + @mock.patch('os.path.exists') + def test_refresh_connector_file_oserr(self, mock_exists): + """Test refresh with connector file having no read permission. + """ + mock_exists.return_value = True + with self.patch_open('fake_path', b'invalid json') as mock_file: + mock_file.side_effect = OSError("Permission denied") + ret = self.commands.refresh( + uuidsentinel.volume, uuidsentinel.instance, 'fake_path' + ) + self.assertEqual(3, ret) + @mock.patch('os.path.exists') def _test_refresh(self, mock_exists): ctxt = context.get_admin_context() cell_ctxt = context.get_admin_context() + cell_ctxt.cell_uuid = '39fd7a1f-db62-45bc-a7b6-8137cef87c9d' fake_connector = self._get_fake_connector_info() mock_exists.return_value = True @@ -3528,11 +3541,14 @@ def test_refresh_invalid_instance_state( output = self.output.getvalue().strip() self.assertIn('must be stopped', output) + @mock.patch.object(objects.InstanceAction, 'action_start') + @mock.patch.object(manage.VolumeAttachmentCommands, '_do_refresh') @mock.patch.object( objects.BlockDeviceMapping, 'get_by_volume_and_instance') @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_refresh_instance_already_locked_failure( - self, mock_get_instance, mock_get_bdm + def test_refresh_instance_already_locked( + self, mock_get_instance, mock_get_bdm, + mock__do_refresh, mock_action_start ): """Test refresh with instance when instance is already locked.""" mock_get_instance.return_value = objects.Instance( @@ -3542,11 +3558,11 @@ def test_refresh_instance_already_locked_failure( mock_get_bdm.return_value = objects.BlockDeviceMapping( uuid=uuidsentinel.bdm, volume_id=uuidsentinel.volume, attachment_id=uuidsentinel.instance) + mock_action = mock.Mock(spec=objects.InstanceAction) + mock_action_start.return_value = mock_action - ret = self._test_refresh() - self.assertEqual(5, ret) - output = self.output.getvalue().strip() - self.assertIn('must be unlocked', output) + self._test_refresh() + mock__do_refresh.assert_called_once() @mock.patch.object( objects.BlockDeviceMapping, 'get_by_volume_and_instance') @@ -3573,9 +3589,9 @@ def test_refresh_invalid_volume_id(self, mock_get_instance, mock_get_bdm): @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.InstanceAction, 'action_start') def test_refresh_attachment_unknown_failure( - self, mock_action_start, mock_get_instance, mock_get_bdm, mock_lock, - mock_unlock, mock_attachment_create, mock_attachment_delete, - mock_attachment_get + self, mock_action_start, mock_get_instance, + mock_get_bdm, mock_lock, mock_unlock, mock_attachment_create, + mock_attachment_delete, mock_attachment_get ): """Test refresh with instance when any other error happens. """ @@ -3606,6 +3622,50 @@ def test_refresh_attachment_unknown_failure( mock_action_start.assert_called_once() mock_action.finish.assert_called_once() + @mock.patch('nova.compute.rpcapi.ComputeAPI', autospec=True) + @mock.patch('nova.volume.cinder.API', autospec=True) + @mock.patch('nova.compute.api.API', autospec=True) + @mock.patch.object(objects.BlockDeviceMapping, 'save') + @mock.patch.object( + objects.BlockDeviceMapping, 'get_by_volume_and_instance') + @mock.patch.object(objects.Instance, 'get_by_uuid') + @mock.patch.object(objects.InstanceAction, 'action_start') + def test_refresh_invalid_connector_host( + self, mock_action_start, mock_get_instance, + mock_get_bdm, mock_save_bdm, mock_compute_api, mock_volume_api, + mock_compute_rpcapi + ): + """Test refresh with a old host not disconnected properly + and connector host info is not correct, a fake-host is + passed. + """ + + fake_volume_api = mock_volume_api.return_value + device_name = '/dev/vda' + + mock_get_instance.return_value = objects.Instance( + uuid=uuidsentinel.instance, + vm_state=obj_fields.InstanceState.STOPPED, + host='old-host', locked=False) + mock_get_bdm.return_value = objects.BlockDeviceMapping( + uuid=uuidsentinel.bdm, volume_id=uuidsentinel.volume, + attachment_id=uuidsentinel.instance, + device_name=device_name) + mock_action = mock.Mock(spec=objects.InstanceAction) + mock_action_start.return_value = mock_action + + fake_volume_api.attachment_create.return_value = { + 'id': uuidsentinel.new_attachment, + } + # in instance we have host as 'old-host' + # but here 'fake-host' is passed in connector info. + fake_volume_api.attachment_update.return_value = { + 'connection_info': self._get_fake_connector_info(), + } + + ret = self._test_refresh() + self.assertEqual(7, ret) + @mock.patch('nova.compute.rpcapi.ComputeAPI', autospec=True) @mock.patch('nova.volume.cinder.API', autospec=True) @mock.patch('nova.compute.api.API', autospec=True) @@ -3615,8 +3675,9 @@ def test_refresh_attachment_unknown_failure( @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.InstanceAction, 'action_start') def test_refresh( - self, mock_action_start, mock_get_instance, mock_get_bdm, - mock_save_bdm, mock_compute_api, mock_volume_api, mock_compute_rpcapi + self, mock_action_start, mock_get_instance, + mock_get_bdm, mock_save_bdm, mock_compute_api, mock_volume_api, + mock_compute_rpcapi ): """Test refresh with a successful code path.""" fake_compute_api = mock_compute_api.return_value @@ -3627,7 +3688,7 @@ def test_refresh( mock_get_instance.return_value = objects.Instance( uuid=uuidsentinel.instance, vm_state=obj_fields.InstanceState.STOPPED, - host='foo', locked=False) + host='fake-host', locked=False) mock_get_bdm.return_value = objects.BlockDeviceMapping( uuid=uuidsentinel.bdm, volume_id=uuidsentinel.volume, attachment_id=uuidsentinel.instance, @@ -3695,6 +3756,44 @@ def test_error_post_mortem(self, mock_conf, mock_parse_args, mock_pm): self.assertEqual(255, manage.main()) self.assertTrue(mock_pm.called) + def _lock_instance(self, ctxt, instance, reason): + instance.locked = True + + def _unlock_instance(self, ctxt, instance): + instance.locked = False + + def test_locked_instance(self): + cm = objects.CellMapping(name='foo', uuid=uuidsentinel.cell) + proj_uuid = uuidutils.generate_uuid() + instance = objects.Instance( + project_id=proj_uuid, uuid=uuidsentinel.instance) + instance.locked = True + + with mock.patch('nova.compute.api.API') as mock_api: + mock_api.return_value.lock.side_effect = self._lock_instance + mock_api.return_value.unlock.side_effect = self._unlock_instance + + with manage.locked_instance(cm, instance, 'some'): + self.assertTrue(instance.locked) + + self.assertTrue(instance.locked) + + def test_unlocked_instance(self): + cm = objects.CellMapping(name='foo', uuid=uuidsentinel.cell) + proj_uuid = uuidutils.generate_uuid() + instance = objects.Instance( + project_id=proj_uuid, uuid=uuidsentinel.instance) + instance.locked = False + + with mock.patch('nova.compute.api.API') as mock_api: + mock_api.return_value.lock.side_effect = self._lock_instance + mock_api.return_value.unlock.side_effect = self._unlock_instance + + with manage.locked_instance(cm, instance, 'some'): + self.assertTrue(instance.locked) + + self.assertFalse(instance.locked) + class LibvirtCommandsTestCase(test.NoDBTestCase): diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py index 0a1e3f54fc3..7a24bc82d98 100644 --- a/nova/tests/unit/compute/test_shelve.py +++ b/nova/tests/unit/compute/test_shelve.py @@ -596,14 +596,6 @@ def check_save(expected_task_state=None): # This is before we've failed. self.assertEqual(task_states.SPAWNING, instance.task_state) tracking['last_state'] = instance.task_state - elif tracking['last_state'] == task_states.SPAWNING: - # This is after we've failed. - self.assertIsNone(instance.host) - self.assertIsNone(instance.node) - self.assertIsNone(instance.task_state) - tracking['last_state'] = instance.task_state - else: - self.fail('Unexpected save!') with mock.patch.object(instance, 'save') as mock_save: mock_save.side_effect = check_save @@ -614,6 +606,10 @@ def check_save(expected_task_state=None): request_spec=objects.RequestSpec(), accel_uuids=[]) + self.assertIsNone(instance.host) + self.assertIsNone(instance.node) + self.assertIsNone(instance.task_state) + mock_notify_instance_action.assert_called_once_with( self.context, instance, 'fake-mini', action='unshelve', phase='start', bdms=mock_bdms) diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py index bcd4cecb857..2cc9de253ee 100644 --- a/nova/tests/unit/pci/test_manager.py +++ b/nova/tests/unit/pci/test_manager.py @@ -528,7 +528,9 @@ def test_claim_available_pf_while_child_vf_is_unavailable(self): # Ensure that the claim actually fixes the inconsistency so when the # parent if freed the children become available too. self.tracker.free_instance( - mock.sentinel.context, {'uuid': uuidsentinel.instance1}) + mock.sentinel.context, + {'uuid': uuidsentinel.instance1, + 'pci_devices': [objects.PciDevice(id=pf['id'])]}) pf_dev = self._get_device_by_address(pf['address']) self.assertEqual('available', pf_dev.status) @@ -586,7 +588,9 @@ def test_claim_available_pf_while_children_vfs_are_in_mixed_state(self): # Ensure that the claim actually fixes the inconsistency so when the # parent if freed the children become available too. self.tracker.free_instance( - mock.sentinel.context, {'uuid': uuidsentinel.instance1}) + mock.sentinel.context, + {'uuid': uuidsentinel.instance1, + 'pci_devices': [objects.PciDevice(id=pf['id'])]}) pf_dev = self._get_device_by_address(pf['address']) self.assertEqual('available', pf_dev.status)