Skip to content

Commit

Permalink
Merge branch 'main' into 4.20-mgmt-server-peers
Browse files Browse the repository at this point in the history
  • Loading branch information
borisstoyanov authored Nov 12, 2024
2 parents 9423dcc + f7b7339 commit c8fd0c1
Show file tree
Hide file tree
Showing 9 changed files with 98 additions and 26 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2078,8 +2078,8 @@ public void copyAsync(Map<VolumeInfo, DataStore> volumeDataStoreMap, VirtualMach
migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath, backingPath);
migrateDiskInfo.setSourceDiskOnStorageFileSystem(isStoragePoolTypeOfFile(sourceStoragePool));
migrateDiskInfoList.add(migrateDiskInfo);
prepareDiskWithSecretConsumerDetail(vmTO, srcVolumeInfo, destVolumeInfo.getPath());
}
prepareDiskWithSecretConsumerDetail(vmTO, srcVolumeInfo, destVolumeInfo.getPath());

migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo);

Expand Down Expand Up @@ -2479,7 +2479,8 @@ protected void verifyLiveMigrationForKVM(Map<VolumeInfo, DataStore> volumeDataSt
throw new CloudRuntimeException("Destination storage pool with ID " + dataStore.getId() + " was not located.");
}

if (srcStoragePoolVO.isManaged() && srcStoragePoolVO.getId() != destStoragePoolVO.getId()) {
boolean isSrcAndDestPoolPowerFlexStorage = srcStoragePoolVO.getPoolType().equals(Storage.StoragePoolType.PowerFlex) && destStoragePoolVO.getPoolType().equals(Storage.StoragePoolType.PowerFlex);
if (srcStoragePoolVO.isManaged() && !isSrcAndDestPoolPowerFlexStorage && srcStoragePoolVO.getId() != destStoragePoolVO.getId()) {
throw new CloudRuntimeException("Migrating a volume online with KVM from managed storage is not currently supported.");
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ public Ternary<String, String, String> getInterfaceDetails(String interfaceName)

@Override
public Answer execute(final OvsFetchInterfaceCommand command, final LibvirtComputingResource libvirtComputingResource) {
final String label = "'" + command.getLabel() + "'";
final String label = command.getLabel();

logger.debug("Will look for network with name-label:" + label);
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package com.cloud.hypervisor.kvm.storage;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
Expand Down Expand Up @@ -48,6 +49,7 @@
import com.linbit.linstor.api.model.Resource;
import com.linbit.linstor.api.model.ResourceConnectionModify;
import com.linbit.linstor.api.model.ResourceDefinition;
import com.linbit.linstor.api.model.ResourceDefinitionModify;
import com.linbit.linstor.api.model.ResourceGroupSpawn;
import com.linbit.linstor.api.model.ResourceMakeAvailable;
import com.linbit.linstor.api.model.ResourceWithVolumes;
Expand Down Expand Up @@ -235,6 +237,34 @@ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, Qemu
}
}

private void setAllowTwoPrimariesOnRD(DevelopersApi api, String rscName) throws ApiException {
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
Properties props = new Properties();
props.put("DrbdOptions/Net/allow-two-primaries", "yes");
props.put("DrbdOptions/Net/protocol", "C");
rdm.setOverrideProps(props);
ApiCallRcList answers = api.resourceDefinitionModify(rscName, rdm);
if (answers.hasError()) {
logger.error(String.format("Unable to set protocol C and 'allow-two-primaries' on %s", rscName));
// do not fail here as adding allow-two-primaries property is only a problem while live migrating
}
}

private void setAllowTwoPrimariesOnRc(DevelopersApi api, String rscName, String inUseNode) throws ApiException {
ResourceConnectionModify rcm = new ResourceConnectionModify();
Properties props = new Properties();
props.put("DrbdOptions/Net/allow-two-primaries", "yes");
props.put("DrbdOptions/Net/protocol", "C");
rcm.setOverrideProps(props);
ApiCallRcList answers = api.resourceConnectionModify(rscName, inUseNode, localNodeName, rcm);
if (answers.hasError()) {
logger.error(String.format(
"Unable to set protocol C and 'allow-two-primaries' on %s/%s/%s",
inUseNode, localNodeName, rscName));
// do not fail here as adding allow-two-primaries property is only a problem while live migrating
}
}

/**
* Checks if the given resource is in use by drbd on any host and
* if so set the drbd option allow-two-primaries
Expand All @@ -246,16 +276,13 @@ private void allow2PrimariesIfInUse(DevelopersApi api, String rscName) throws Ap
String inUseNode = LinstorUtil.isResourceInUse(api, rscName);
if (inUseNode != null && !inUseNode.equalsIgnoreCase(localNodeName)) {
// allow 2 primaries for live migration, should be removed by disconnect on the other end
ResourceConnectionModify rcm = new ResourceConnectionModify();
Properties props = new Properties();
props.put("DrbdOptions/Net/allow-two-primaries", "yes");
props.put("DrbdOptions/Net/protocol", "C");
rcm.setOverrideProps(props);
ApiCallRcList answers = api.resourceConnectionModify(rscName, inUseNode, localNodeName, rcm);
if (answers.hasError()) {
logger.error("Unable to set protocol C and 'allow-two-primaries' on {}/{}/{}",
inUseNode, localNodeName, rscName);
// do not fail here as adding allow-two-primaries property is only a problem while live migrating

// if non hyperconverged setup, we have to set allow-two-primaries on the resource-definition
// as there is no resource connection between diskless nodes.
if (LinstorUtil.areResourcesDiskless(api, rscName, Arrays.asList(inUseNode, localNodeName))) {
setAllowTwoPrimariesOnRD(api, rscName);
} else {
setAllowTwoPrimariesOnRc(api, rscName, inUseNode);
}
}
}
Expand Down Expand Up @@ -294,11 +321,22 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<S
return true;
}

private void removeTwoPrimariesRcProps(DevelopersApi api, String inUseNode, String rscName) throws ApiException {
private void removeTwoPrimariesRDProps(DevelopersApi api, String rscName, List<String> deleteProps)
throws ApiException {
ResourceDefinitionModify rdm = new ResourceDefinitionModify();
rdm.deleteProps(deleteProps);
ApiCallRcList answers = api.resourceDefinitionModify(rscName, rdm);
if (answers.hasError()) {
logger.error(
String.format("Failed to remove 'protocol' and 'allow-two-primaries' on %s: %s",
rscName, LinstorUtil.getBestErrorMessage(answers)));
// do not fail here as removing allow-two-primaries property isn't fatal
}
}

private void removeTwoPrimariesRcProps(DevelopersApi api, String rscName, String inUseNode, List<String> deleteProps)
throws ApiException {
ResourceConnectionModify rcm = new ResourceConnectionModify();
List<String> deleteProps = new ArrayList<>();
deleteProps.add("DrbdOptions/Net/allow-two-primaries");
deleteProps.add("DrbdOptions/Net/protocol");
rcm.deleteProps(deleteProps);
ApiCallRcList answers = api.resourceConnectionModify(rscName, localNodeName, inUseNode, rcm);
if (answers.hasError()) {
Expand All @@ -310,6 +348,15 @@ private void removeTwoPrimariesRcProps(DevelopersApi api, String inUseNode, Stri
}
}

private void removeTwoPrimariesProps(DevelopersApi api, String inUseNode, String rscName) throws ApiException {
List<String> deleteProps = new ArrayList<>();
deleteProps.add("DrbdOptions/Net/allow-two-primaries");
deleteProps.add("DrbdOptions/Net/protocol");

removeTwoPrimariesRDProps(api, rscName, deleteProps);
removeTwoPrimariesRcProps(api, rscName, inUseNode, deleteProps);
}

private boolean tryDisconnectLinstor(String volumePath, KVMStoragePool pool)
{
if (volumePath == null) {
Expand Down Expand Up @@ -343,7 +390,7 @@ private boolean tryDisconnectLinstor(String volumePath, KVMStoragePool pool)
try {
String inUseNode = LinstorUtil.isResourceInUse(api, rsc.getName());
if (inUseNode != null && !inUseNode.equalsIgnoreCase(localNodeName)) {
removeTwoPrimariesRcProps(api, inUseNode, rsc.getName());
removeTwoPrimariesProps(api, inUseNode, rsc.getName());
}
} catch (ApiException apiEx) {
logger.error(apiEx.getBestMessage());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package org.apache.cloudstack.storage.datastore.util;

import com.linbit.linstor.api.ApiClient;
import com.linbit.linstor.api.ApiConsts;
import com.linbit.linstor.api.ApiException;
import com.linbit.linstor.api.DevelopersApi;
import com.linbit.linstor.api.model.ApiCallRc;
Expand All @@ -33,6 +34,7 @@

import javax.annotation.Nonnull;

import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
Expand Down Expand Up @@ -210,6 +212,28 @@ public static String isResourceInUse(DevelopersApi api, String rscName) throws A
return null;
}

/**
* Check if the given resources are diskless.
*
* @param api developer api object to use
* @param rscName resource name to check in use state.
* @return NodeName where the resource is inUse, if not in use `null`
* @throws ApiException forwards api errors
*/
public static boolean areResourcesDiskless(DevelopersApi api, String rscName, Collection<String> nodeNames)
throws ApiException {
List<Resource> rscs = api.resourceList(rscName, null, null);
if (rscs != null) {
Collection<String> disklessNodes = rscs.stream()
.filter(rsc -> rsc.getFlags() != null && (rsc.getFlags().contains(ApiConsts.FLAG_DISKLESS) ||
rsc.getFlags().contains(ApiConsts.FLAG_DRBD_DISKLESS)))
.map(rsc -> rsc.getNodeName().toLowerCase())
.collect(Collectors.toList());
return disklessNodes.containsAll(nodeNames.stream().map(String::toLowerCase).collect(Collectors.toList()));
}
return false;
}

/**
* Try to get the device path for the given resource name.
* This could be made a bit more direct after java-linstor api is fixed for layer data subtypes.
Expand Down
2 changes: 1 addition & 1 deletion systemvm/debian/opt/cloud/bin/cs/CsFile.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,6 @@ def deleteLine(self, search):
self.new_config = list(temp_config)

def compare(self, o):
result = (isinstance(o, self.__class__) and set(self.config) == set(o.config))
result = (isinstance(o, self.__class__) and self.config == o.config)
logging.debug("Comparison of CsFiles content is ==> %s" % result)
return result
2 changes: 1 addition & 1 deletion ui/src/config/section/compute.js
Original file line number Diff line number Diff line change
Expand Up @@ -542,7 +542,7 @@ export default {
if (store.listAllProjects) {
fields.push('project')
}
if (store.apis.scaleKubernetesCluster.params.filter(x => x.name === 'autoscalingenabled').length > 0) {
if (store.apis.scaleKubernetesCluster?.params?.filter(x => x.name === 'autoscalingenabled').length > 0) {
fields.splice(2, 0, 'autoscalingenabled')
}
fields.push('zonename')
Expand Down
2 changes: 1 addition & 1 deletion ui/src/views/compute/KubernetesServiceTab.vue
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ export default {
}
},
mounted () {
if (this.$store.getters.apis.scaleKubernetesCluster.params.filter(x => x.name === 'nodeids').length > 0 && this.resource.clustertype === 'CloudManaged') {
if (this.$store.getters.apis.scaleKubernetesCluster?.params?.filter(x => x.name === 'nodeids').length > 0 && this.resource.clustertype === 'CloudManaged') {
this.vmColumns.push({
key: 'actions',
title: this.$t('label.actions'),
Expand Down
4 changes: 2 additions & 2 deletions utils/src/main/java/com/cloud/utils/HttpUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,8 @@ public static boolean validateSessionKey(final HttpSession session, final Map<St
return false;
}
final String jsessionidFromCookie = HttpUtils.findCookie(cookies, "JSESSIONID");
if (jsessionidFromCookie == null
|| !(jsessionidFromCookie.startsWith(session.getId() + '.'))) {
if (jsessionidFromCookie != null
&& !(jsessionidFromCookie.equals(session.getId()) || jsessionidFromCookie.startsWith(session.getId() + '.'))) {
LOGGER.error("JSESSIONID from cookie is invalid.");
return false;
}
Expand Down
6 changes: 3 additions & 3 deletions utils/src/test/java/com/cloud/utils/HttpUtilsTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ public void validateSessionKeyTest() {
params = null;
cookies = new Cookie[]{new Cookie(sessionKeyString, sessionKeyValue)};
assertFalse(HttpUtils.validateSessionKey(session, params, cookies, "randomString", HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));
assertFalse(HttpUtils.validateSessionKey(session, params, cookies, sessionKeyString, HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));
assertTrue(HttpUtils.validateSessionKey(session, params, cookies, sessionKeyString, HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));

// param null, cookies not null test (JSESSIONID is not null and matches)
cookies = new Cookie[2];
Expand All @@ -95,7 +95,7 @@ public void validateSessionKeyTest() {
cookies = null;
assertFalse(HttpUtils.validateSessionKey(session, params, cookies, sessionKeyString, HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));
params.put(sessionKeyString, new String[]{sessionKeyValue});
assertFalse(HttpUtils.validateSessionKey(session, params, cookies, sessionKeyString, HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));
assertTrue(HttpUtils.validateSessionKey(session, params, cookies, sessionKeyString, HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));

// both param and cookies not null test (JSESSIONID is null)
params = new HashMap<String, Object[]>();
Expand All @@ -104,7 +104,7 @@ public void validateSessionKeyTest() {
params.put(sessionKeyString, new String[]{"incorrectValue"});
assertFalse(HttpUtils.validateSessionKey(session, params, cookies, sessionKeyString, HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));
params.put(sessionKeyString, new String[]{sessionKeyValue});
assertFalse(HttpUtils.validateSessionKey(session, params, cookies, sessionKeyString, HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));
assertTrue(HttpUtils.validateSessionKey(session, params, cookies, sessionKeyString, HttpUtils.ApiSessionKeyCheckOption.CookieOrParameter));

// both param and cookies not null test (JSESSIONID is not null but mismatches)
params = new HashMap<String, Object[]>();
Expand Down

0 comments on commit c8fd0c1

Please sign in to comment.