diff --git a/.python-version b/.python-version index d70c8f8d89f2..c8cfe3959183 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -3.6 +3.10 diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 3b6a7b7de292..6cfadc5b6e4a 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -433,3 +433,10 @@ iscsi.session.cleanup.enabled=false # Implicit host tags managed by agent.properties # host.tags= + +# Timeout(in seconds) for SSL handshake when agent connects to server. When no value is set then default value of 30s +# will be used +#ssl.handshake.timeout= + +# Wait(in seconds) during agent reconnections. When no value is set then default value of 5s will be used +#backoff.seconds= diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 15f010808aca..88768ae228fc 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -26,24 +26,25 @@ import java.net.UnknownHostException; import java.nio.channels.ClosedChannelException; import java.nio.charset.Charset; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Timer; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import javax.naming.ConfigurationException; -import com.cloud.resource.AgentStatusUpdater; -import com.cloud.resource.ResourceStatusUpdater; -import com.cloud.agent.api.PingAnswer; -import com.cloud.utils.NumbersUtil; import org.apache.cloudstack.agent.lb.SetupMSListAnswer; import org.apache.cloudstack.agent.lb.SetupMSListCommand; import org.apache.cloudstack.ca.PostCertificateRenewalCommand; @@ -55,10 +56,10 @@ import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.ThreadContext; import com.cloud.agent.api.AgentControlAnswer; import com.cloud.agent.api.AgentControlCommand; @@ -67,6 +68,7 @@ import com.cloud.agent.api.CronCommand; import com.cloud.agent.api.MaintainAnswer; import com.cloud.agent.api.MaintainCommand; +import com.cloud.agent.api.PingAnswer; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.ShutdownCommand; @@ -76,9 +78,11 @@ import com.cloud.agent.transport.Response; import com.cloud.exception.AgentControlChannelException; import com.cloud.host.Host; +import com.cloud.resource.AgentStatusUpdater; +import com.cloud.resource.ResourceStatusUpdater; import com.cloud.resource.ServerResource; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.backoff.BackoffAlgorithm; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.NioConnectionException; @@ -90,7 +94,6 @@ import com.cloud.utils.nio.Task; import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; -import org.apache.logging.log4j.ThreadContext; /** * @config @@ -114,7 +117,7 @@ public enum ExitStatus { Configuration(66), // Exiting due to configuration problems. Error(67); // Exiting because of error. - int value; + final int value; ExitStatus(final int value) { this.value = value; @@ -125,128 +128,134 @@ public int value() { } } - List _controlListeners = new ArrayList(); + CopyOnWriteArrayList controlListeners = new CopyOnWriteArrayList<>(); - IAgentShell _shell; - NioConnection _connection; - ServerResource _resource; - Link _link; - Long _id; + IAgentShell shell; + NioConnection connection; + ServerResource serverResource; + Link link; + Long id; - Timer _timer = new Timer("Agent Timer"); - Timer certTimer; - Timer hostLBTimer; + ScheduledExecutorService selfTaskExecutor; + ScheduledExecutorService certExecutor; + ScheduledExecutorService hostLbCheckExecutor; - List _watchList = new ArrayList(); - long _sequence = 0; - long _lastPingResponseTime = 0; - long _pingInterval = 0; - AtomicInteger _inProgress = new AtomicInteger(); + CopyOnWriteArrayList> watchList = new CopyOnWriteArrayList<>(); + AtomicLong sequence = new AtomicLong(0); + AtomicLong lastPingResponseTime = new AtomicLong(0L); + long pingInterval = 0; + AtomicInteger commandsInProgress = new AtomicInteger(0); - StartupTask _startup = null; - long _startupWaitDefault = 180000; - long _startupWait = _startupWaitDefault; - boolean _reconnectAllowed = true; - //For time sentitive task, e.g. PingTask - ThreadPoolExecutor _ugentTaskPool; - ExecutorService _executor; + private final AtomicReference startupTask = new AtomicReference<>(); + private static final long DEFAULT_STARTUP_WAIT = 180; + long startupWait = DEFAULT_STARTUP_WAIT; + boolean reconnectAllowed = true; - Thread _shutdownThread = new ShutdownThread(this); + //For time sensitive task, e.g. PingTask + ThreadPoolExecutor outRequestHandler; + ExecutorService requestHandler; - private String _keystoreSetupPath; - private String _keystoreCertImportPath; + Thread shutdownThread = new ShutdownThread(this); - // for simulator use only - public Agent(final IAgentShell shell) { - _shell = shell; - _link = null; + private String keystoreSetupSetupPath; + private String keystoreCertImportScriptPath; - _connection = new NioClient("Agent", _shell.getNextHost(), _shell.getPort(), _shell.getWorkers(), this); + private String hostname; - Runtime.getRuntime().addShutdownHook(_shutdownThread); + protected String getLinkLog(final Link link) { + if (link == null) { + return ""; + } + StringBuilder str = new StringBuilder(); + if (logger.isTraceEnabled()) { + str.append(System.identityHashCode(link)).append("-"); + } + str.append(link.getSocketAddress()); + return str.toString(); + } - _ugentTaskPool = - new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue(), new NamedThreadFactory( - "UgentTask")); + protected String getAgentName() { + return (serverResource != null && serverResource.isAppendAgentNameToLogs() && + StringUtils.isNotBlank(serverResource.getName())) ? + serverResource.getName() : + "Agent"; + } - _executor = - new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( - "agentRequest-Handler")); + protected void setupShutdownHookAndInitExecutors() { + if (logger.isTraceEnabled()) { + logger.trace("Adding shutdown hook"); + } + Runtime.getRuntime().addShutdownHook(shutdownThread); + selfTaskExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Agent-SelfTask")); + outRequestHandler = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, + new SynchronousQueue<>(), new NamedThreadFactory("AgentOutRequest-Handler")); + requestHandler = new ThreadPoolExecutor(shell.getWorkers(), 5 * shell.getWorkers(), 1, TimeUnit.DAYS, + new LinkedBlockingQueue<>(), new NamedThreadFactory("AgentRequest-Handler")); } - public Agent(final IAgentShell shell, final int localAgentId, final ServerResource resource) throws ConfigurationException { - _shell = shell; - _resource = resource; - _link = null; + // for simulator use only + public Agent(final IAgentShell shell) { + this.shell = shell; + link = null; + connection = new NioClient(getAgentName(), this.shell.getNextHost(), this.shell.getPort(), + this.shell.getWorkers(), this.shell.getSslHandshakeTimeout(), this); + setupShutdownHookAndInitExecutors(); + } + public Agent(final IAgentShell shell, final int localAgentId, final ServerResource resource) throws ConfigurationException { + this.shell = shell; + serverResource = resource; + link = null; resource.setAgentControl(this); - - final String value = _shell.getPersistentProperty(getResourceName(), "id"); - _id = value != null ? Long.parseLong(value) : null; - logger.info("id is {}", ObjectUtils.defaultIfNull(_id, "")); - + final String value = this.shell.getPersistentProperty(getResourceName(), "id"); + id = value != null ? Long.parseLong(value) : null; + logger.info("id is {}", (id != null ? id : "")); final Map params = new HashMap<>(); - // merge with properties from command line to let resource access command line parameters - for (final Map.Entry cmdLineProp : _shell.getCmdLineProperties().entrySet()) { + for (final Map.Entry cmdLineProp : this.shell.getCmdLineProperties().entrySet()) { params.put(cmdLineProp.getKey(), cmdLineProp.getValue()); } - - if (!_resource.configure(getResourceName(), params)) { - throw new ConfigurationException("Unable to configure " + _resource.getName()); - } - - final String host = _shell.getNextHost(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - - // ((NioClient)_connection).setBindAddress(_shell.getPrivateIp()); - - logger.debug("Adding shutdown hook"); - Runtime.getRuntime().addShutdownHook(_shutdownThread); - - _ugentTaskPool = - new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue(), new NamedThreadFactory( - "UgentTask")); - - _executor = - new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue(), new NamedThreadFactory( - "agentRequest-Handler")); - - logger.info("Agent [id = {} : type = {} : zone = {} : pod = {} : workers = {} : host = {} : port = {}", ObjectUtils.defaultIfNull(_id, "new"), getResourceName(), - _shell.getZone(), _shell.getPod(), _shell.getWorkers(), host, _shell.getPort()); + if (!serverResource.configure(getResourceName(), params)) { + throw new ConfigurationException("Unable to configure " + serverResource.getName()); + } + ThreadContext.put("agentname", getAgentName()); + final String host = this.shell.getNextHost(); + connection = new NioClient(getAgentName(), host, this.shell.getPort(), this.shell.getWorkers(), + this.shell.getSslHandshakeTimeout(), this); + setupShutdownHookAndInitExecutors(); + logger.info("Agent [id = {}, type = {}, zone = {}, pod = {}, workers = {}, host = {}, " + + "port = {}, local id = {}]", + (id != null ? String.valueOf(id) : "new"), getResourceName(), this.shell.getZone(), + this.shell.getPod(), this.shell.getWorkers(), host, this.shell.getPort(), localAgentId); } public String getVersion() { - return _shell.getVersion(); + return shell.getVersion(); } public String getResourceGuid() { - final String guid = _shell.getGuid(); + final String guid = shell.getGuid(); return guid + "-" + getResourceName(); } public String getZone() { - return _shell.getZone(); + return shell.getZone(); } public String getPod() { - return _shell.getPod(); + return shell.getPod(); } protected void setLink(final Link link) { - _link = link; + this.link = link; } public ServerResource getResource() { - return _resource; - } - - public BackoffAlgorithm getBackoffAlgorithm() { - return _shell.getBackoffAlgorithm(); + return serverResource; } public String getResourceName() { - return _resource.getClass().getSimpleName(); + return serverResource.getClass().getSimpleName(); } /** @@ -255,71 +264,65 @@ public String getResourceName() { * agent instances and its inner objects. */ private void scavengeOldAgentObjects() { - _executor.submit(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(2000L); - } catch (final InterruptedException ignored) { - } finally { - System.gc(); - } + requestHandler.submit(() -> { + try { + Thread.sleep(2000L); + } catch (final InterruptedException ignored) { + } finally { + System.gc(); } }); } public void start() { - if (!_resource.start()) { - logger.error("Unable to start the resource: {}", _resource.getName()); - throw new CloudRuntimeException("Unable to start the resource: " + _resource.getName()); + if (!serverResource.start()) { + String msg = String.format("Unable to start the resource: %s", serverResource.getName()); + logger.error(msg); + throw new CloudRuntimeException(msg); } - _keystoreSetupPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_SETUP_SCRIPT); - if (_keystoreSetupPath == null) { + keystoreSetupSetupPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_SETUP_SCRIPT); + if (keystoreSetupSetupPath == null) { throw new CloudRuntimeException(String.format("Unable to find the '%s' script", KeyStoreUtils.KS_SETUP_SCRIPT)); } - _keystoreCertImportPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_IMPORT_SCRIPT); - if (_keystoreCertImportPath == null) { + keystoreCertImportScriptPath = Script.findScript("scripts/util/", KeyStoreUtils.KS_IMPORT_SCRIPT); + if (keystoreCertImportScriptPath == null) { throw new CloudRuntimeException(String.format("Unable to find the '%s' script", KeyStoreUtils.KS_IMPORT_SCRIPT)); } try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { logger.warn("Attempt to connect to server generated NIO Connection Exception {}, trying again", e.getLocalizedMessage()); } - while (!_connection.isStartup()) { - final String host = _shell.getNextHost(); - _shell.getBackoffAlgorithm().waitBeforeRetry(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - logger.info("Connecting to host:{}", host); + while (!connection.isStartup()) { + final String host = shell.getNextHost(); + shell.getBackoffAlgorithm().waitBeforeRetry(); + connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), + shell.getSslHandshakeTimeout(), this); + logger.info("Connecting to host: {}", host); try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { - _connection.stop(); - try { - _connection.cleanUp(); - } catch (final IOException ex) { - logger.warn("Fail to clean up old connection. {}", ex); - } + stopAndCleanupConnection(false); logger.info("Attempted to connect to the server, but received an unexpected exception, trying again...", e); } } - _shell.updateConnectedHost(); + shell.updateConnectedHost(); scavengeOldAgentObjects(); } public void stop(final String reason, final String detail) { - logger.info("Stopping the agent: Reason = {} {}", reason, ": Detail = " + ObjectUtils.defaultIfNull(detail, "")); - _reconnectAllowed = false; - if (_connection != null) { + logger.info("Stopping the agent: Reason = {}{}", reason, (detail != null ? ": Detail = " + detail : "")); + reconnectAllowed = false; + if (connection != null) { final ShutdownCommand cmd = new ShutdownCommand(reason, detail); try { - if (_link != null) { - final Request req = new Request(_id != null ? _id : -1, -1, cmd, false); - _link.send(req.toBytes()); + if (link != null) { + final Request req = new Request(id != null ? id : -1, -1, cmd, false); + link.send(req.toBytes()); } } catch (final ClosedChannelException e) { logger.warn("Unable to send: {}", cmd.toString()); @@ -332,108 +335,127 @@ public void stop(final String reason, final String detail) { } catch (final InterruptedException e) { logger.debug("Who the heck interrupted me here?"); } - _connection.stop(); - _connection = null; - _link = null; + connection.stop(); + connection = null; + link = null; } - if (_resource != null) { - _resource.stop(); - _resource = null; + if (serverResource != null) { + serverResource.stop(); + serverResource = null; } - if (_startup != null) { - _startup = null; + if (startupTask.get() != null) { + startupTask.set(null); } - if (_ugentTaskPool != null) { - _ugentTaskPool.shutdownNow(); - _ugentTaskPool = null; + if (outRequestHandler != null) { + outRequestHandler.shutdownNow(); + outRequestHandler = null; } - if (_executor != null) { - _executor.shutdown(); - _executor = null; + if (requestHandler != null) { + requestHandler.shutdown(); + requestHandler = null; } - if (_timer != null) { - _timer.cancel(); - _timer = null; + if (selfTaskExecutor != null) { + selfTaskExecutor.shutdown(); + selfTaskExecutor = null; } - if (hostLBTimer != null) { - hostLBTimer.cancel(); - hostLBTimer = null; + if (hostLbCheckExecutor != null) { + hostLbCheckExecutor.shutdown(); + hostLbCheckExecutor = null; } - if (certTimer != null) { - certTimer.cancel(); - certTimer = null; + if (certExecutor != null) { + certExecutor.shutdown(); + certExecutor = null; } } public Long getId() { - return _id; + return id; } public void setId(final Long id) { logger.debug("Set agent id {}", id); - _id = id; - _shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); + this.id = id; + shell.setPersistentProperty(getResourceName(), "id", Long.toString(id)); } - private synchronized void scheduleServicesRestartTask() { - if (certTimer != null) { - certTimer.cancel(); - certTimer.purge(); + private void scheduleCertificateRenewalTask() { + String name = "CertificateRenewalTask"; + if (certExecutor != null && !certExecutor.isShutdown()) { + certExecutor.shutdown(); + try { + if (!certExecutor.awaitTermination(1, TimeUnit.SECONDS)) { + certExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}", + name, e.getMessage()); + certExecutor.shutdownNow(); + } } - certTimer = new Timer("Certificate Renewal Timer"); - certTimer.schedule(new PostCertificateRenewalTask(this), 5000L); + certExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); + certExecutor.schedule(new PostCertificateRenewalTask(this), 5, TimeUnit.SECONDS); } - private synchronized void scheduleHostLBCheckerTask(final long checkInterval) { - if (hostLBTimer != null) { - hostLBTimer.cancel(); + private void scheduleHostLBCheckerTask(final long checkInterval) { + String name = "HostLBCheckerTask"; + if (hostLbCheckExecutor != null && !hostLbCheckExecutor.isShutdown()) { + hostLbCheckExecutor.shutdown(); + try { + if (!hostLbCheckExecutor.awaitTermination(1, TimeUnit.SECONDS)) { + hostLbCheckExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + logger.debug("Forcing {} shutdown as it did not shutdown in the desired time due to: {}", + name, e.getMessage()); + hostLbCheckExecutor.shutdownNow(); + } } if (checkInterval > 0L) { - logger.info("Scheduling preferred host timer task with host.lb.interval={}ms", checkInterval); - hostLBTimer = new Timer("Host LB Timer"); - hostLBTimer.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval); + logger.info("Scheduling preferred host task with host.lb.interval={}ms", checkInterval); + hostLbCheckExecutor = Executors.newSingleThreadScheduledExecutor((new NamedThreadFactory(name))); + hostLbCheckExecutor.scheduleAtFixedRate(new PreferredHostCheckerTask(), checkInterval, checkInterval, + TimeUnit.MILLISECONDS); } } public void scheduleWatch(final Link link, final Request request, final long delay, final long period) { - synchronized (_watchList) { - logger.debug("Adding task with request: {} to watch list", request.toString()); - final WatchTask task = new WatchTask(link, request, this); - _timer.schedule(task, 0, period); - _watchList.add(task); + if (logger.isDebugEnabled()) { + logger.debug("Adding a watch list"); } + final WatchTask task = new WatchTask(link, request, this); + final ScheduledFuture future = selfTaskExecutor.scheduleAtFixedRate(task, delay, period, TimeUnit.MILLISECONDS); + watchList.add(future); } public void triggerUpdate() { - PingCommand command = _resource.getCurrentStatus(getId()); + PingCommand command = serverResource.getCurrentStatus(getId()); command.setOutOfBand(true); logger.debug("Sending out of band ping"); - - final Request request = new Request(_id, -1, command, false); + final Request request = new Request(id, -1, command, false); request.setSequence(getNextSequence()); try { - _link.send(request.toBytes()); + link.send(request.toBytes()); } catch (final ClosedChannelException e) { logger.warn("Unable to send ping update: {}", request.toString()); } } protected void cancelTasks() { - synchronized (_watchList) { - for (final WatchTask task : _watchList) { - task.cancel(); - } - logger.debug("Clearing {} tasks of watch list", _watchList.size()); - _watchList.clear(); + for (final ScheduledFuture task : watchList) { + task.cancel(true); + } + if (logger.isDebugEnabled()) { + logger.debug("Clearing watch list: " + watchList.size()); } + watchList.clear(); } /** @@ -444,27 +466,47 @@ protected void cancelTasks() { * when host is added back */ protected void cleanupAgentZoneProperties() { - _shell.setPersistentProperty(null, "zone", ""); - _shell.setPersistentProperty(null, "cluster", ""); - _shell.setPersistentProperty(null, "pod", ""); + shell.setPersistentProperty(null, "zone", ""); + shell.setPersistentProperty(null, "cluster", ""); + shell.setPersistentProperty(null, "pod", ""); + } + + public void lockStartupTask(final Link link) { + logger.debug("Creating startup task for link: {}", getLinkLog(link)); + StartupTask currentTask = startupTask.get(); + if (currentTask != null) { + logger.warn("A Startup task is already locked or in progress, cannot create for link {}", + getLinkLog(link)); + return; + } + currentTask = new StartupTask(link); + if (startupTask.compareAndSet(null, currentTask)) { + selfTaskExecutor.schedule(currentTask, startupWait, TimeUnit.SECONDS); + return; + } + logger.warn("Failed to lock a StartupTask for link: {}", getLinkLog(link)); } - public synchronized void lockStartupTask(final Link link) { - _startup = new StartupTask(link); - _timer.schedule(_startup, _startupWait); + protected boolean cancelStartupTask() { + StartupTask task = startupTask.getAndSet(null); + if (task != null) { + task.cancel(); + return true; + } + return false; } public void sendStartup(final Link link) { - final StartupCommand[] startup = _resource.initialize(); + final StartupCommand[] startup = serverResource.initialize(); if (startup != null) { - final String msHostList = _shell.getPersistentProperty(null, "host"); + final String msHostList = shell.getPersistentProperty(null, "host"); final Command[] commands = new Command[startup.length]; for (int i = 0; i < startup.length; i++) { setupStartupCommand(startup[i]); startup[i].setMSHostList(msHostList); commands[i] = startup[i]; } - final Request request = new Request(_id != null ? _id : -1, -1, commands, false, false); + final Request request = new Request(id != null ? id : -1, -1, commands, false, false); request.setSequence(getNextSequence()); logger.debug("Sending Startup: {}", request.toString()); @@ -472,31 +514,39 @@ public void sendStartup(final Link link) { try { link.send(request.toBytes()); } catch (final ClosedChannelException e) { - logger.warn("Unable to send request: {}", request.toString()); + logger.warn("Unable to send request to {} due to '{}', request: {}", + getLinkLog(link), e.getMessage(), request); } - if (_resource instanceof ResourceStatusUpdater) { - ((ResourceStatusUpdater) _resource).registerStatusUpdater(this); + if (serverResource instanceof ResourceStatusUpdater) { + ((ResourceStatusUpdater) serverResource).registerStatusUpdater(this); } } } - protected void setupStartupCommand(final StartupCommand startup) { - InetAddress addr; + protected String retrieveHostname() { + if (logger.isTraceEnabled()) { + logger.trace("Retrieving hostname with resource={}", serverResource.getClass().getSimpleName()); + } + final String result = Script.runSimpleBashScript(Script.getExecutableAbsolutePath("hostname"), 500); + if (StringUtils.isNotBlank(result)) { + return result; + } try { - addr = InetAddress.getLocalHost(); + InetAddress address = InetAddress.getLocalHost(); + return address.toString(); } catch (final UnknownHostException e) { logger.warn("unknown host? ", e); throw new CloudRuntimeException("Cannot get local IP address"); } + } - final Script command = new Script("hostname", 500, logger); - final OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); - final String result = command.execute(parser); - final String hostname = result == null ? parser.getLine() : addr.toString(); - + protected void setupStartupCommand(final StartupCommand startup) { startup.setId(getId()); - if (startup.getName() == null) { + if (StringUtils.isBlank(startup.getName())) { + if (StringUtils.isBlank(hostname)) { + hostname = retrieveHostname(); + } startup.setName(hostname); } startup.setDataCenter(getZone()); @@ -518,78 +568,75 @@ public Task create(final Task.Type type, final Link link, final byte[] data) { return new ServerHandler(type, link, data); } - protected void reconnect(final Link link) { - if (!_reconnectAllowed) { + protected void closeAndTerminateLink(final Link link) { + if (link == null) { return; } - synchronized (this) { - if (_startup != null) { - _startup.cancel(); - _startup = null; - } - } + link.close(); + link.terminated(); + } - if (link != null) { - link.close(); - link.terminated(); + protected void stopAndCleanupConnection(boolean waitForStop) { + if (connection == null) { + return; } - - setLink(null); - cancelTasks(); - - _resource.disconnected(); - - logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", _shell.getConnectedHost(), _inProgress.get()); - - _connection.stop(); - + connection.stop(); try { - _connection.cleanUp(); + connection.cleanUp(); } catch (final IOException e) { logger.warn("Fail to clean up old connection. {}", e); } - - while (_connection.isStartup()) { - _shell.getBackoffAlgorithm().waitBeforeRetry(); + if (!waitForStop) { + return; } + do { + shell.getBackoffAlgorithm().waitBeforeRetry(); + } while (connection.isStartup()); + } + protected void reconnect(final Link link) { + if (!reconnectAllowed) { + logger.debug("Reconnect requested but it is not allowed {}", getLinkLog(link)); + return; + } + cancelStartupTask(); + closeAndTerminateLink(link); + closeAndTerminateLink(this.link); + setLink(null); + cancelTasks(); + serverResource.disconnected(); + logger.info("Lost connection to host: {}. Attempting reconnection while we still have {} commands in progress.", shell.getConnectedHost(), commandsInProgress.get()); + stopAndCleanupConnection(true); do { - final String host = _shell.getNextHost(); - _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this); - logger.info("Reconnecting to host:{}", host); + final String host = shell.getNextHost(); + connection = new NioClient(getAgentName(), host, shell.getPort(), shell.getWorkers(), shell.getSslHandshakeTimeout(), this); + logger.info("Reconnecting to host: {}", host); try { - _connection.start(); + connection.start(); } catch (final NioConnectionException e) { logger.info("Attempted to re-connect to the server, but received an unexpected exception, trying again...", e); - _connection.stop(); - try { - _connection.cleanUp(); - } catch (final IOException ex) { - logger.warn("Fail to clean up old connection. {}", ex); - } + stopAndCleanupConnection(false); } - _shell.getBackoffAlgorithm().waitBeforeRetry(); - } while (!_connection.isStartup()); - _shell.updateConnectedHost(); - logger.info("Connected to the host: {}", _shell.getConnectedHost()); + shell.getBackoffAlgorithm().waitBeforeRetry(); + } while (!connection.isStartup()); + shell.updateConnectedHost(); + logger.info("Connected to the host: {}", shell.getConnectedHost()); } public void processStartupAnswer(final Answer answer, final Response response, final Link link) { - boolean cancelled = false; - synchronized (this) { - if (_startup != null) { - _startup.cancel(); - _startup = null; - } else { - cancelled = true; - } - } + boolean answerValid = cancelStartupTask(); final StartupAnswer startup = (StartupAnswer)answer; if (!startup.getResult()) { logger.error("Not allowed to connect to the server: {}", answer.getDetails()); + if (serverResource != null && !serverResource.isExitOnFailures()) { + logger.trace("{} does not allow exit on failure, reconnecting", + serverResource.getClass().getSimpleName()); + reconnect(link); + return; + } System.exit(1); } - if (cancelled) { + if (!answerValid) { logger.warn("Threw away a startup answer because we're reconnecting."); return; } @@ -597,12 +644,12 @@ public void processStartupAnswer(final Answer answer, final Response response, f logger.info("Process agent startup answer, agent id = {}", startup.getHostId()); setId(startup.getHostId()); - _pingInterval = (long)startup.getPingInterval() * 1000; // change to ms. + pingInterval = startup.getPingInterval() * 1000L; // change to ms. - setLastPingResponseTime(); - scheduleWatch(link, response, _pingInterval, _pingInterval); + updateLastPingResponseTime(); + scheduleWatch(link, response, pingInterval, pingInterval); - _ugentTaskPool.setKeepAliveTime(2 * _pingInterval, TimeUnit.MILLISECONDS); + outRequestHandler.setKeepAliveTime(2 * pingInterval, TimeUnit.MILLISECONDS); logger.info("Startup Response Received: agent id = {}", getId()); } @@ -618,9 +665,6 @@ protected void processRequest(final Request request, final Link link) { final Command cmd = cmds[i]; Answer answer; try { - if (cmd.getContextParam("logid") != null) { - ThreadContext.put("logcontextid", cmd.getContextParam("logid")); - } if (logger.isDebugEnabled()) { if (!requestLogged) // ensures request is logged only once per method call { @@ -635,7 +679,7 @@ protected void processRequest(final Request request, final Link link) { if (cmd instanceof CronCommand) { final CronCommand watch = (CronCommand)cmd; - scheduleWatch(link, request, (long)watch.getInterval() * 1000, watch.getInterval() * 1000); + scheduleWatch(link, request, watch.getInterval() * 1000L, watch.getInterval() * 1000L); answer = new Answer(cmd, true, null); } else if (cmd instanceof ShutdownCommand) { final ShutdownCommand shutdown = (ShutdownCommand)cmd; @@ -644,10 +688,17 @@ protected void processRequest(final Request request, final Link link) { if (shutdown.isRemoveHost()) { cleanupAgentZoneProperties(); } - _reconnectAllowed = false; + reconnectAllowed = false; answer = new Answer(cmd, true, null); } else if (cmd instanceof ReadyCommand && ((ReadyCommand)cmd).getDetails() != null) { + logger.debug("Not ready to connect to mgt server: {}", ((ReadyCommand)cmd).getDetails()); + if (serverResource != null && !serverResource.isExitOnFailures()) { + logger.trace("{} does not allow exit on failure, reconnecting", + serverResource.getClass().getSimpleName()); + reconnect(link); + return; + } System.exit(1); return; } else if (cmd instanceof MaintainCommand) { @@ -655,12 +706,10 @@ protected void processRequest(final Request request, final Link link) { answer = new MaintainAnswer((MaintainCommand)cmd); } else if (cmd instanceof AgentControlCommand) { answer = null; - synchronized (_controlListeners) { - for (final IAgentControlListener listener : _controlListeners) { - answer = listener.processControlRequest(request, (AgentControlCommand)cmd); - if (answer != null) { - break; - } + for (final IAgentControlListener listener : controlListeners) { + answer = listener.processControlRequest(request, (AgentControlCommand)cmd); + if (answer != null) { + break; } } @@ -672,8 +721,8 @@ protected void processRequest(final Request request, final Link link) { answer = setupAgentKeystore((SetupKeyStoreCommand) cmd); } else if (cmd instanceof SetupCertificateCommand && ((SetupCertificateCommand) cmd).isHandleByAgent()) { answer = setupAgentCertificate((SetupCertificateCommand) cmd); - if (Host.Type.Routing.equals(_resource.getType())) { - scheduleServicesRestartTask(); + if (Host.Type.Routing.equals(serverResource.getType())) { + scheduleCertificateRenewalTask(); } } else if (cmd instanceof SetupMSListCommand) { answer = setupManagementServerList((SetupMSListCommand) cmd); @@ -681,11 +730,11 @@ protected void processRequest(final Request request, final Link link) { if (cmd instanceof ReadyCommand) { processReadyCommand(cmd); } - _inProgress.incrementAndGet(); + commandsInProgress.incrementAndGet(); try { - answer = _resource.executeRequest(cmd); + answer = serverResource.executeRequest(cmd); } finally { - _inProgress.decrementAndGet(); + commandsInProgress.decrementAndGet(); } if (answer == null) { logger.debug("Response: unsupported command {}", cmd.toString()); @@ -739,13 +788,13 @@ public Answer setupAgentKeystore(final SetupKeyStoreCommand cmd) { final String keyStoreFile = agentFile.getParent() + "/" + KeyStoreUtils.KS_FILENAME; final String csrFile = agentFile.getParent() + "/" + KeyStoreUtils.CSR_FILENAME; - String storedPassword = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + String storedPassword = shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); if (StringUtils.isEmpty(storedPassword)) { storedPassword = keyStorePassword; - _shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); + shell.setPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY, storedPassword); } - Script script = new Script(_keystoreSetupPath, 300000, logger); + Script script = new Script(keystoreSetupSetupPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(keyStoreFile); script.add(storedPassword); @@ -789,8 +838,8 @@ private Answer setupAgentCertificate(final SetupCertificateCommand cmd) { throw new CloudRuntimeException("Unable to save received agent client and ca certificates", e); } - String ksPassphrase = _shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); - Script script = new Script(_keystoreCertImportPath, 300000, logger); + String ksPassphrase = shell.getPersistentProperty(null, KeyStoreUtils.KS_PASSPHRASE_PROPERTY); + Script script = new Script(keystoreCertImportScriptPath, 300000, logger); script.add(agentFile.getAbsolutePath()); script.add(ksPassphrase); script.add(keyStoreFile); @@ -812,9 +861,9 @@ private void processManagementServerList(final List msList, final String if (CollectionUtils.isNotEmpty(msList) && StringUtils.isNotEmpty(lbAlgorithm)) { try { final String newMSHosts = String.format("%s%s%s", com.cloud.utils.StringUtils.toCSVList(msList), IAgentShell.hostLbAlgorithmSeparator, lbAlgorithm); - _shell.setPersistentProperty(null, "host", newMSHosts); - _shell.setHosts(newMSHosts); - _shell.resetHostCounter(); + shell.setPersistentProperty(null, "host", newMSHosts); + shell.setHosts(newMSHosts); + shell.resetHostCounter(); logger.info("Processed new management server list: {}", newMSHosts); } catch (final Exception e) { throw new CloudRuntimeException("Could not persist received management servers list", e); @@ -823,7 +872,7 @@ private void processManagementServerList(final List msList, final String if ("shuffle".equals(lbAlgorithm)) { scheduleHostLBCheckerTask(0); } else { - scheduleHostLBCheckerTask(_shell.getLbCheckerInterval(lbCheckInterval)); + scheduleHostLBCheckerTask(shell.getLbCheckerInterval(lbCheckInterval)); } } @@ -839,16 +888,14 @@ public void processResponse(final Response response, final Link link) { processStartupAnswer(answer, response, link); } else if (answer instanceof AgentControlAnswer) { // Notice, we are doing callback while holding a lock! - synchronized (_controlListeners) { - for (final IAgentControlListener listener : _controlListeners) { - listener.processControlResponse(response, (AgentControlAnswer)answer); - } + for (final IAgentControlListener listener : controlListeners) { + listener.processControlResponse(response, (AgentControlAnswer)answer); } - } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && _reconnectAllowed) { + } else if (answer instanceof PingAnswer && (((PingAnswer) answer).isSendStartup()) && reconnectAllowed) { logger.info("Management server requested startup command to reinitialize the agent"); sendStartup(link); } else { - setLastPingResponseTime(); + updateLastPingResponseTime(); } } @@ -883,22 +930,24 @@ private void verifyAgentArch(String arch) { public void processOtherTask(final Task task) { final Object obj = task.get(); if (obj instanceof Response) { - if (System.currentTimeMillis() - _lastPingResponseTime > _pingInterval * _shell.getPingRetries()) { - logger.error("Ping Interval has gone past {}. Won't reconnect to mgt server, as connection is still alive", _pingInterval * _shell.getPingRetries()); + if (System.currentTimeMillis() - lastPingResponseTime.get() > pingInterval * shell.getPingRetries()) { + logger.error("Ping Interval has gone past {}. Won't reconnect to mgt server, as connection is still alive", + pingInterval * shell.getPingRetries()); return; } - final PingCommand ping = _resource.getCurrentStatus(getId()); - final Request request = new Request(_id, -1, ping, false); + final PingCommand ping = serverResource.getCurrentStatus(getId()); + final Request request = new Request(id, -1, ping, false); request.setSequence(getNextSequence()); logger.debug("Sending ping: {}", request.toString()); try { task.getLink().send(request.toBytes()); //if i can send pingcommand out, means the link is ok - setLastPingResponseTime(); + updateLastPingResponseTime(); } catch (final ClosedChannelException e) { - logger.warn("Unable to send request: {}", request.toString()); + logger.warn("Unable to send request to {} due to '{}', request: {}", + getLinkLog(task.getLink()), e.getMessage(), request); } } else if (obj instanceof Request) { @@ -908,11 +957,11 @@ public void processOtherTask(final Task task) { ThreadContext.put("logcontextid", command.getContextParam("logid")); } Answer answer = null; - _inProgress.incrementAndGet(); + commandsInProgress.incrementAndGet(); try { - answer = _resource.executeRequest(command); + answer = serverResource.executeRequest(command); } finally { - _inProgress.decrementAndGet(); + commandsInProgress.decrementAndGet(); } if (answer != null) { final Response response = new Response(req, answer); @@ -929,46 +978,37 @@ public void processOtherTask(final Task task) { } } - public synchronized void setLastPingResponseTime() { - _lastPingResponseTime = System.currentTimeMillis(); + public void updateLastPingResponseTime() { + lastPingResponseTime.set(System.currentTimeMillis()); } - protected synchronized long getNextSequence() { - return _sequence++; + protected long getNextSequence() { + return sequence.getAndIncrement(); } @Override public void registerControlListener(final IAgentControlListener listener) { - synchronized (_controlListeners) { - _controlListeners.add(listener); - } + controlListeners.add(listener); } @Override public void unregisterControlListener(final IAgentControlListener listener) { - synchronized (_controlListeners) { - _controlListeners.remove(listener); - } + controlListeners.remove(listener); } @Override public AgentControlAnswer sendRequest(final AgentControlCommand cmd, final int timeoutInMilliseconds) throws AgentControlChannelException { final Request request = new Request(getId(), -1, new Command[] {cmd}, true, false); request.setSequence(getNextSequence()); - final AgentControlListener listener = new AgentControlListener(request); - registerControlListener(listener); try { postRequest(request); - synchronized (listener) { - try { - listener.wait(timeoutInMilliseconds); - } catch (final InterruptedException e) { - logger.warn("sendRequest is interrupted, exit waiting"); - } + try { + listener.wait(timeoutInMilliseconds); + } catch (final InterruptedException e) { + logger.warn("sendRequest is interrupted, exit waiting"); } - return listener.getAnswer(); } finally { unregisterControlListener(listener); @@ -983,9 +1023,9 @@ public void postRequest(final AgentControlCommand cmd) throws AgentControlChanne } private void postRequest(final Request request) throws AgentControlChannelException { - if (_link != null) { + if (link != null) { try { - _link.send(request.toBytes()); + link.send(request.toBytes()); } catch (final ClosedChannelException e) { logger.warn("Unable to post agent control request: {}", request.toString()); throw new AgentControlChannelException("Unable to post agent control request due to " + e.getMessage()); @@ -1037,7 +1077,7 @@ public void run() { } } - public class WatchTask extends ManagedContextTimerTask { + public class WatchTask implements Runnable { protected Request _request; protected Agent _agent; protected Link _link; @@ -1050,11 +1090,11 @@ public WatchTask(final Link link, final Request request, final Agent agent) { } @Override - protected void runInContext() { + public void run() { logger.trace("Scheduling {}", (_request instanceof Response ? "Ping" : "Watch Task")); try { if (_request instanceof Response) { - _ugentTaskPool.submit(new ServerHandler(Task.Type.OTHER, _link, _request)); + outRequestHandler.submit(new ServerHandler(Task.Type.OTHER, _link, _request)); } else { _link.schedule(new ServerHandler(Task.Type.OTHER, _link, _request)); } @@ -1064,34 +1104,33 @@ protected void runInContext() { } } - public class StartupTask extends ManagedContextTimerTask { + public class StartupTask implements Runnable { protected Link _link; - protected volatile boolean cancelled = false; + private final AtomicBoolean cancelled = new AtomicBoolean(false); public StartupTask(final Link link) { logger.debug("Startup task created"); _link = link; } - @Override - public synchronized boolean cancel() { + public boolean cancel() { // TimerTask.cancel may fail depends on the calling context - if (!cancelled) { - cancelled = true; - _startupWait = _startupWaitDefault; + if (cancelled.compareAndSet(false, true)) { + startupWait = DEFAULT_STARTUP_WAIT; logger.debug("Startup task cancelled"); - return super.cancel(); } return true; } @Override - protected synchronized void runInContext() { - if (!cancelled) { - logger.info("The startup command is now cancelled"); - cancelled = true; - _startup = null; - _startupWait = _startupWaitDefault * 2; + public void run() { + if (cancelled.compareAndSet(false, true)) { + logger.info("The running startup command is now invalid. Attempting reconnect"); + startupTask.set(null); + startupWait = DEFAULT_STARTUP_WAIT * 2; + if (logger.isDebugEnabled()) { + logger.debug("Executing reconnect from task - {}", getLinkLog(_link)); + } reconnect(_link); } } @@ -1123,7 +1162,7 @@ public ServerHandler(final Task.Type type, final Link link, final Request req) { @Override public void doTask(final Task task) throws TaskExecutionException { if (task.getType() == Task.Type.CONNECT) { - _shell.getBackoffAlgorithm().reset(); + shell.getBackoffAlgorithm().reset(); setLink(task.getLink()); sendStartup(task.getLink()); } else if (task.getType() == Task.Type.DATA) { @@ -1136,7 +1175,7 @@ public void doTask(final Task task) throws TaskExecutionException { } else { //put the requests from mgt server into another thread pool, as the request may take a longer time to finish. Don't block the NIO main thread pool //processRequest(request, task.getLink()); - _executor.submit(new AgentRequestHandler(getType(), getLink(), request)); + requestHandler.submit(new AgentRequestHandler(getType(), getLink(), request)); } } catch (final ClassNotFoundException e) { logger.error("Unable to find this request "); @@ -1144,14 +1183,8 @@ public void doTask(final Task task) throws TaskExecutionException { logger.error("Error parsing task", e); } } else if (task.getType() == Task.Type.DISCONNECT) { - try { - // an issue has been found if reconnect immediately after disconnecting. please refer to https://github.com/apache/cloudstack/issues/8517 - // wait 5 seconds before reconnecting - Thread.sleep(5000); - } catch (InterruptedException e) { - } + logger.debug("Executing disconnect task - {}", getLinkLog(task.getLink())); reconnect(task.getLink()); - return; } else if (task.getType() == Task.Type.OTHER) { processOtherTask(task); } @@ -1174,26 +1207,25 @@ public PostCertificateRenewalTask(final Agent agent) { protected void runInContext() { while (true) { try { - if (_inProgress.get() == 0) { + if (commandsInProgress.get() == 0) { logger.debug("Running post certificate renewal task to restart services."); // Let the resource perform any post certificate renewal cleanups - _resource.executeRequest(new PostCertificateRenewalCommand()); + serverResource.executeRequest(new PostCertificateRenewalCommand()); - IAgentShell shell = agent._shell; - ServerResource resource = agent._resource.getClass().newInstance(); + IAgentShell shell = agent.shell; + ServerResource resource = agent.serverResource.getClass().getDeclaredConstructor().newInstance(); // Stop current agent agent.cancelTasks(); - agent._reconnectAllowed = false; - Runtime.getRuntime().removeShutdownHook(agent._shutdownThread); + agent.reconnectAllowed = false; + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); agent.stop(ShutdownCommand.Requested, "Restarting due to new X509 certificates"); // Nullify references for GC - agent._shell = null; - agent._watchList = null; - agent._shutdownThread = null; - agent._controlListeners = null; + agent.watchList = null; + agent.shutdownThread = null; + agent.controlListeners = null; agent = null; // Start a new agent instance @@ -1201,7 +1233,6 @@ protected void runInContext() { return; } logger.debug("Other tasks are in progress, will retry post certificate renewal command after few seconds"); - Thread.sleep(5000); } catch (final Exception e) { logger.warn("Failed to execute post certificate renewal command:", e); @@ -1216,35 +1247,34 @@ public class PreferredHostCheckerTask extends ManagedContextTimerTask { @Override protected void runInContext() { try { - final String[] msList = _shell.getHosts(); + final String[] msList = shell.getHosts(); if (msList == null || msList.length < 1) { return; } final String preferredHost = msList[0]; - final String connectedHost = _shell.getConnectedHost(); - logger.trace("Running preferred host checker task, connected host={}, preferred host={}", connectedHost, preferredHost); - - if (preferredHost != null && !preferredHost.equals(connectedHost) && _link != null) { - boolean isHostUp = true; - try (final Socket socket = new Socket()) { - socket.connect(new InetSocketAddress(preferredHost, _shell.getPort()), 5000); - } catch (final IOException e) { - isHostUp = false; - logger.trace("Host: {} is not reachable", preferredHost); - - } - if (isHostUp && _link != null && _inProgress.get() == 0) { + final String connectedHost = shell.getConnectedHost(); + logger.debug("Running preferred host checker task, connected host={}, preferred host={}", + connectedHost, preferredHost); + if (preferredHost == null || preferredHost.equals(connectedHost) || link == null) { + return; + } + boolean isHostUp = false; + try (final Socket socket = new Socket()) { + socket.connect(new InetSocketAddress(preferredHost, shell.getPort()), 5000); + isHostUp = true; + } catch (final IOException e) { + logger.debug("Host: {} is not reachable", preferredHost); + } + if (isHostUp && link != null && commandsInProgress.get() == 0) { + if (logger.isDebugEnabled()) { logger.debug("Preferred host {} is found to be reachable, trying to reconnect", preferredHost); - - _shell.resetHostCounter(); - reconnect(_link); } + shell.resetHostCounter(); + reconnect(link); } } catch (Throwable t) { logger.error("Error caught while attempting to connect to preferred host", t); } } - } - } diff --git a/agent/src/main/java/com/cloud/agent/AgentShell.java b/agent/src/main/java/com/cloud/agent/AgentShell.java index 0699e00250bb..c5257b95b7c2 100644 --- a/agent/src/main/java/com/cloud/agent/AgentShell.java +++ b/agent/src/main/java/com/cloud/agent/AgentShell.java @@ -16,29 +16,6 @@ // under the License. package com.cloud.agent; -import com.cloud.agent.Agent.ExitStatus; -import com.cloud.agent.dao.StorageComponent; -import com.cloud.agent.dao.impl.PropertiesStorage; -import com.cloud.agent.properties.AgentProperties; -import com.cloud.agent.properties.AgentPropertiesFileHandler; -import com.cloud.resource.ServerResource; -import com.cloud.utils.LogUtils; -import com.cloud.utils.ProcessUtil; -import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.backoff.BackoffAlgorithm; -import com.cloud.utils.backoff.impl.ConstantTimeBackoff; -import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.commons.daemon.Daemon; -import org.apache.commons.daemon.DaemonContext; -import org.apache.commons.daemon.DaemonInitException; -import org.apache.commons.lang.math.NumberUtils; -import org.apache.commons.lang3.BooleanUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.config.Configurator; - -import javax.naming.ConfigurationException; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; @@ -53,6 +30,31 @@ import java.util.Properties; import java.util.UUID; +import javax.naming.ConfigurationException; + +import org.apache.commons.daemon.Daemon; +import org.apache.commons.daemon.DaemonContext; +import org.apache.commons.daemon.DaemonInitException; +import org.apache.commons.lang.math.NumberUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.config.Configurator; + +import com.cloud.agent.Agent.ExitStatus; +import com.cloud.agent.dao.StorageComponent; +import com.cloud.agent.dao.impl.PropertiesStorage; +import com.cloud.agent.properties.AgentProperties; +import com.cloud.agent.properties.AgentPropertiesFileHandler; +import com.cloud.resource.ServerResource; +import com.cloud.utils.LogUtils; +import com.cloud.utils.ProcessUtil; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.backoff.BackoffAlgorithm; +import com.cloud.utils.backoff.impl.ConstantTimeBackoff; +import com.cloud.utils.exception.CloudRuntimeException; + public class AgentShell implements IAgentShell, Daemon { protected static Logger LOGGER = LogManager.getLogger(AgentShell.class); @@ -406,7 +408,9 @@ public void init(String[] args) throws ConfigurationException { LOGGER.info("Defaulting to the constant time backoff algorithm"); _backoff = new ConstantTimeBackoff(); - _backoff.configure("ConstantTimeBackoff", new HashMap()); + Map map = new HashMap<>(); + map.put("seconds", _properties.getProperty("backoff.seconds")); + _backoff.configure("ConstantTimeBackoff", map); } private void launchAgent() throws ConfigurationException { @@ -455,6 +459,11 @@ public void launchNewAgent(ServerResource resource) throws ConfigurationExceptio agent.start(); } + @Override + public Integer getSslHandshakeTimeout() { + return AgentPropertiesFileHandler.getPropertyValue(AgentProperties.SSL_HANDSHAKE_TIMEOUT); + } + public synchronized int getNextAgentId() { return _nextAgentId++; } diff --git a/agent/src/main/java/com/cloud/agent/IAgentShell.java b/agent/src/main/java/com/cloud/agent/IAgentShell.java index 2dd08fffd459..7f04048795d7 100644 --- a/agent/src/main/java/com/cloud/agent/IAgentShell.java +++ b/agent/src/main/java/com/cloud/agent/IAgentShell.java @@ -70,4 +70,6 @@ public interface IAgentShell { String getConnectedHost(); void launchNewAgent(ServerResource resource) throws ConfigurationException; + + Integer getSslHandshakeTimeout(); } diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 8f97edc39357..2efe24e655d5 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -810,6 +810,13 @@ public Property getWorkers() { */ public static final Property HOST_TAGS = new Property<>("host.tags", null, String.class); + /** + * Timeout for SSL handshake in seconds + * Data type: Integer.
+ * Default value: null + */ + public static final Property SSL_HANDSHAKE_TIMEOUT = new Property<>("ssl.handshake.timeout", null, Integer.class); + public static class Property { private String name; private T defaultValue; diff --git a/agent/src/test/java/com/cloud/agent/AgentShellTest.java b/agent/src/test/java/com/cloud/agent/AgentShellTest.java index 4126692546f2..6d9758cc3dc8 100644 --- a/agent/src/test/java/com/cloud/agent/AgentShellTest.java +++ b/agent/src/test/java/com/cloud/agent/AgentShellTest.java @@ -362,4 +362,11 @@ public void updateAndGetConnectedHost() { Assert.assertEquals(expected, shell.getConnectedHost()); } + + @Test + public void testGetSslHandshakeTimeout() { + Integer expected = 1; + agentPropertiesFileHandlerMocked.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.eq(AgentProperties.SSL_HANDSHAKE_TIMEOUT))).thenReturn(expected); + Assert.assertEquals(expected, agentShellSpy.getSslHandshakeTimeout()); + } } diff --git a/agent/src/test/java/com/cloud/agent/AgentTest.java b/agent/src/test/java/com/cloud/agent/AgentTest.java new file mode 100644 index 000000000000..89023c8d6950 --- /dev/null +++ b/agent/src/test/java/com/cloud/agent/AgentTest.java @@ -0,0 +1,258 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import javax.naming.ConfigurationException; + +import org.apache.logging.log4j.Logger; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.resource.ServerResource; +import com.cloud.utils.backoff.impl.ConstantTimeBackoff; +import com.cloud.utils.nio.Link; +import com.cloud.utils.nio.NioConnection; + +@RunWith(MockitoJUnitRunner.class) +public class AgentTest { + Agent agent; + private AgentShell shell; + private ServerResource serverResource; + private Logger logger; + + @Before + public void setUp() throws ConfigurationException { + shell = mock(AgentShell.class); + serverResource = mock(ServerResource.class); + doReturn(true).when(serverResource).configure(any(), any()); + doReturn(1).when(shell).getWorkers(); + doReturn(1).when(shell).getPingRetries(); + agent = new Agent(shell, 1, serverResource); + logger = mock(Logger.class); + ReflectionTestUtils.setField(agent, "logger", logger); + } + + @Test + public void testGetLinkLogNullLinkReturnsEmptyString() { + Link link = null; + String result = agent.getLinkLog(link); + assertEquals("", result); + } + + @Test + public void testGetLinkLogLinkWithTraceEnabledReturnsLinkLogWithHashCode() { + Link link = mock(Link.class); + InetSocketAddress socketAddress = new InetSocketAddress("192.168.1.100", 1111); + when(link.getSocketAddress()).thenReturn(socketAddress); + when(logger.isTraceEnabled()).thenReturn(true); + + String result = agent.getLinkLog(link); + System.out.println(result); + assertTrue(result.startsWith(System.identityHashCode(link) + "-")); + assertTrue(result.contains("192.168.1.100")); + } + + @Test + public void testGetAgentNameWhenServerResourceIsNull() { + ReflectionTestUtils.setField(agent, "serverResource", null); + assertEquals("Agent", agent.getAgentName()); + } + + @Test + public void testGetAgentNameWhenAppendAgentNameIsTrue() { + when(serverResource.isAppendAgentNameToLogs()).thenReturn(true); + when(serverResource.getName()).thenReturn("TestAgent"); + + String agentName = agent.getAgentName(); + assertEquals("TestAgent", agentName); + } + + @Test + public void testGetAgentNameWhenAppendAgentNameIsFalse() { + when(serverResource.isAppendAgentNameToLogs()).thenReturn(false); + + String agentName = agent.getAgentName(); + assertEquals("Agent", agentName); + } + + @Test + public void testAgentInitialization() { + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); + when(shell.getPingRetries()).thenReturn(3); + when(shell.getWorkers()).thenReturn(5); + agent.setupShutdownHookAndInitExecutors(); + assertNotNull(agent.selfTaskExecutor); + assertNotNull(agent.outRequestHandler); + assertNotNull(agent.requestHandler); + } + + @Test + public void testAgentShutdownHookAdded() { + Runtime.getRuntime().removeShutdownHook(agent.shutdownThread); + when(logger.isTraceEnabled()).thenReturn(true); + agent.setupShutdownHookAndInitExecutors(); + verify(logger).trace("Adding shutdown hook"); + } + + @Test + public void testGetResourceGuidValidGuidAndResourceName() { + when(shell.getGuid()).thenReturn("12345"); + String result = agent.getResourceGuid(); + assertTrue(result.startsWith("12345-" + ServerResource.class.getSimpleName())); + } + + @Test + public void testGetZoneReturnsValidZone() { + when(shell.getZone()).thenReturn("ZoneA"); + String result = agent.getZone(); + assertEquals("ZoneA", result); + } + + @Test + public void testGetPodReturnsValidPod() { + when(shell.getPod()).thenReturn("PodA"); + String result = agent.getPod(); + assertEquals("PodA", result); + } + + @Test + public void testSetLinkAssignsLink() { + Link mockLink = mock(Link.class); + agent.setLink(mockLink); + assertEquals(mockLink, agent.link); + } + + @Test + public void testGetResourceReturnsServerResource() { + ServerResource mockResource = mock(ServerResource.class); + ReflectionTestUtils.setField(agent, "serverResource", mockResource); + ServerResource result = agent.getResource(); + assertSame(mockResource, result); + } + + @Test + public void testGetResourceName() { + String result = agent.getResourceName(); + assertTrue(result.startsWith(ServerResource.class.getSimpleName())); + } + + @Test + public void testUpdateLastPingResponseTimeUpdatesCurrentTime() { + long beforeUpdate = System.currentTimeMillis(); + agent.updateLastPingResponseTime(); + long updatedTime = agent.lastPingResponseTime.get(); + assertTrue(updatedTime >= beforeUpdate); + assertTrue(updatedTime <= System.currentTimeMillis()); + } + + @Test + public void testGetNextSequenceIncrementsSequence() { + long initialSequence = agent.getNextSequence(); + long nextSequence = agent.getNextSequence(); + assertEquals(initialSequence + 1, nextSequence); + long thirdSequence = agent.getNextSequence(); + assertEquals(nextSequence + 1, thirdSequence); + } + + @Test + public void testRegisterControlListenerAddsListener() { + IAgentControlListener listener = mock(IAgentControlListener.class); + agent.registerControlListener(listener); + assertTrue(agent.controlListeners.contains(listener)); + } + + @Test + public void testUnregisterControlListenerRemovesListener() { + IAgentControlListener listener = mock(IAgentControlListener.class); + agent.registerControlListener(listener); + assertTrue(agent.controlListeners.contains(listener)); + agent.unregisterControlListener(listener); + assertFalse(agent.controlListeners.contains(listener)); + } + + @Test + public void testCloseAndTerminateLinkLinkIsNullDoesNothing() { + agent.closeAndTerminateLink(null); + } + + @Test + public void testCloseAndTerminateLinkValidLinkCallsCloseAndTerminate() { + Link mockLink = mock(Link.class); + agent.closeAndTerminateLink(mockLink); + verify(mockLink).close(); + verify(mockLink).terminated(); + } + + @Test + public void testStopAndCleanupConnectionConnectionIsNullDoesNothing() { + agent.connection = null; + agent.stopAndCleanupConnection(false); + } + + @Test + public void testStopAndCleanupConnectionValidConnectionNoWaitStopsAndCleansUp() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + agent.connection = mockConnection; + agent.stopAndCleanupConnection(false); + verify(mockConnection).stop(); + verify(mockConnection).cleanUp(); + } + + @Test + public void testStopAndCleanupConnectionCleanupThrowsIOExceptionLogsWarning() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + agent.connection = mockConnection; + doThrow(new IOException("Cleanup failed")).when(mockConnection).cleanUp(); + agent.stopAndCleanupConnection(false); + verify(mockConnection).stop(); + verify(logger).warn(eq("Fail to clean up old connection. {}"), any(IOException.class)); + } + + @Test + public void testStopAndCleanupConnectionValidConnectionWaitForStopWaitsForStartupToStop() throws IOException { + NioConnection mockConnection = mock(NioConnection.class); + ConstantTimeBackoff mockBackoff = mock(ConstantTimeBackoff.class); + mockBackoff.setTimeToWait(0); + agent.connection = mockConnection; + when(shell.getBackoffAlgorithm()).thenReturn(mockBackoff); + when(mockConnection.isStartup()).thenReturn(true, true, false); + agent.stopAndCleanupConnection(true); + verify(mockConnection).stop(); + verify(mockConnection).cleanUp(); + verify(mockBackoff, times(3)).waitBeforeRetry(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java index 68204d432534..f041c8342aec 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/RoleService.java +++ b/api/src/main/java/org/apache/cloudstack/acl/RoleService.java @@ -30,6 +30,11 @@ public interface RoleService { ConfigKey EnableDynamicApiChecker = new ConfigKey<>("Advanced", Boolean.class, "dynamic.apichecker.enabled", "false", "If set to true, this enables the dynamic role-based api access checker and disables the default static role-based api access checker.", true); + ConfigKey DynamicApiCheckerCachePeriod = new ConfigKey<>("Advanced", Integer.class, + "dynamic.apichecker.cache.period", "0", + "Defines the expiration time in seconds for the Dynamic API Checker cache, determining how long cached data is retained before being refreshed. If set to zero then caching will be disabled", + false); + boolean isEnabled(); /** diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java index b91e56dcaef0..895e93289923 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java @@ -100,7 +100,7 @@ public EnumSet getDetails() throws InvalidParameterValueException dv = EnumSet.of(DomainDetails.all); } else { try { - ArrayList dc = new ArrayList(); + ArrayList dc = new ArrayList<>(); for (String detail : viewDetails) { dc.add(DomainDetails.valueOf(detail)); } @@ -142,7 +142,10 @@ protected void updateDomainResponse(List response) { if (CollectionUtils.isEmpty(response)) { return; } - _resourceLimitService.updateTaggedResourceLimitsAndCountsForDomains(response, getTag()); + EnumSet details = getDetails(); + if (details.contains(DomainDetails.all) || details.contains(DomainDetails.resource)) { + _resourceLimitService.updateTaggedResourceLimitsAndCountsForDomains(response, getTag()); + } if (!getShowIcon()) { return; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java index 0a962b19e4f3..8cfb4d8406b4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmd.java @@ -149,7 +149,10 @@ protected void updateAccountResponse(List response) { if (CollectionUtils.isEmpty(response)) { return; } - _resourceLimitService.updateTaggedResourceLimitsAndCountsForAccounts(response, getTag()); + EnumSet details = getDetails(); + if (details.contains(DomainDetails.all) || details.contains(DomainDetails.resource)) { + _resourceLimitService.updateTaggedResourceLimitsAndCountsForAccounts(response, getTag()); + } if (!getShowIcon()) { return; } diff --git a/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java b/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java index d670e4d3a88f..4f6f1ad66c93 100644 --- a/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java +++ b/api/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementService.java @@ -39,7 +39,7 @@ public interface OutOfBandManagementService { long getId(); boolean isOutOfBandManagementEnabled(Host host); void submitBackgroundPowerSyncTask(Host host); - boolean transitionPowerStateToDisabled(List hosts); + boolean transitionPowerStateToDisabled(List hostIds); OutOfBandManagementResponse enableOutOfBandManagement(DataCenter zone); OutOfBandManagementResponse enableOutOfBandManagement(Cluster cluster); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java index 3c9d4cb67ae1..45f175e9a815 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmdTest.java @@ -18,6 +18,7 @@ import java.util.List; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.DomainResponse; import org.junit.Assert; import org.junit.Test; @@ -71,7 +72,17 @@ public void testUpdateDomainResponseWithDomains() { cmd._resourceLimitService = resourceLimitService; ReflectionTestUtils.setField(cmd, "tag", "abc"); cmd.updateDomainResponse(List.of(Mockito.mock(DomainResponse.class))); - Mockito.verify(resourceLimitService, Mockito.times(1)).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); + Mockito.verify(resourceLimitService).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); + } + + @Test + public void testUpdateDomainResponseWithDomainsMinDetails() { + ListDomainsCmd cmd = new ListDomainsCmd(); + ReflectionTestUtils.setField(cmd, "viewDetails", List.of(ApiConstants.DomainDetails.min.toString())); + cmd._resourceLimitService = resourceLimitService; + ReflectionTestUtils.setField(cmd, "tag", "abc"); + cmd.updateDomainResponse(List.of(Mockito.mock(DomainResponse.class))); + Mockito.verify(resourceLimitService, Mockito.never()).updateTaggedResourceLimitsAndCountsForDomains(Mockito.any(), Mockito.any()); } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java index 896a7a6c826b..a1ba9270345c 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/account/ListAccountsCmdTest.java @@ -18,6 +18,7 @@ import java.util.List; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.AccountResponse; import org.junit.Assert; import org.junit.Test; @@ -58,7 +59,7 @@ public void testGetTag() { } @Test - public void testUpdateDomainResponseNoDomains() { + public void testUpdateAccountResponseNoAccounts() { ListAccountsCmd cmd = new ListAccountsCmd(); cmd._resourceLimitService = resourceLimitService; cmd.updateAccountResponse(null); @@ -66,11 +67,21 @@ public void testUpdateDomainResponseNoDomains() { } @Test - public void testUpdateDomainResponseWithDomains() { + public void testUpdateDomainResponseWithAccounts() { ListAccountsCmd cmd = new ListAccountsCmd(); cmd._resourceLimitService = resourceLimitService; ReflectionTestUtils.setField(cmd, "tag", "abc"); cmd.updateAccountResponse(List.of(Mockito.mock(AccountResponse.class))); Mockito.verify(resourceLimitService, Mockito.times(1)).updateTaggedResourceLimitsAndCountsForAccounts(Mockito.any(), Mockito.any()); } + + @Test + public void testUpdateDomainResponseWithAccountsMinDetails() { + ListAccountsCmd cmd = new ListAccountsCmd(); + ReflectionTestUtils.setField(cmd, "viewDetails", List.of(ApiConstants.DomainDetails.min.toString())); + cmd._resourceLimitService = resourceLimitService; + ReflectionTestUtils.setField(cmd, "tag", "abc"); + cmd.updateAccountResponse(List.of(Mockito.mock(AccountResponse.class))); + Mockito.verify(resourceLimitService, Mockito.never()).updateTaggedResourceLimitsAndCountsForAccounts(Mockito.any(), Mockito.any()); + } } diff --git a/core/src/main/java/com/cloud/resource/ServerResource.java b/core/src/main/java/com/cloud/resource/ServerResource.java index 1602a78d9a47..092019e7f21b 100644 --- a/core/src/main/java/com/cloud/resource/ServerResource.java +++ b/core/src/main/java/com/cloud/resource/ServerResource.java @@ -78,4 +78,12 @@ public interface ServerResource extends Manager { void setAgentControl(IAgentControl agentControl); + default boolean isExitOnFailures() { + return true; + } + + default boolean isAppendAgentNameToLogs() { + return false; + } + } diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index e8ffd86ac4f1..94c73d8f4d60 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; -import com.cloud.exception.ResourceAllocationException; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.ConfigKey; @@ -38,6 +37,7 @@ import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.OperationTimedoutException; +import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -101,6 +101,10 @@ public interface VirtualMachineManager extends Manager { "refer documentation", true, ConfigKey.Scope.Zone); + ConfigKey VmSyncPowerStateTransitioning = new ConfigKey<>("Advanced", Boolean.class, "vm.sync.power.state.transitioning", "true", + "Whether to sync power states of the transitioning and stalled VMs while processing VM power reports.", false); + + interface Topics { String VM_POWER_STATE = "vm.powerstate"; } @@ -286,24 +290,22 @@ static String getHypervisorHostname(String name) { /** * Obtains statistics for a list of VMs; CPU and network utilization - * @param hostId ID of the host - * @param hostName name of the host + * @param host host * @param vmIds list of VM IDs * @return map of VM ID and stats entry for the VM */ - HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds); + HashMap getVirtualMachineStatistics(Host host, List vmIds); /** * Obtains statistics for a list of VMs; CPU and network utilization - * @param hostId ID of the host - * @param hostName name of the host - * @param vmMap map of VM IDs and the corresponding VirtualMachine object + * @param host host + * @param vmMap map of VM instanceName and its ID * @return map of VM ID and stats entry for the VM */ - HashMap getVirtualMachineStatistics(long hostId, String hostName, Map vmMap); + HashMap getVirtualMachineStatistics(Host host, Map vmMap); - HashMap> getVmDiskStatistics(long hostId, String hostName, Map vmMap); + HashMap> getVmDiskStatistics(Host host, Map vmInstanceNameIdMap); - HashMap> getVmNetworkStatistics(long hostId, String hostName, Map vmMap); + HashMap> getVmNetworkStatistics(Host host, Map vmInstanceNameIdMap); Map getDiskOfferingSuitabilityForVm(long vmId, List diskOfferingIds); diff --git a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java index 1c3edad886bb..59f0679249cb 100644 --- a/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/main/java/com/cloud/capacity/CapacityManager.java @@ -16,14 +16,11 @@ // under the License. package com.cloud.capacity; -import java.util.Map; - import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.host.Host; import com.cloud.offering.ServiceOffering; -import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.VMTemplateVO; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; @@ -118,6 +115,10 @@ public interface CapacityManager { "Percentage (as a value between 0 and 1) of secondary storage capacity threshold.", true); + ConfigKey CapacityCalculateWorkers = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class, + "capacity.calculate.workers", "1", + "Number of worker threads to be used for capacities calculation", true); + public boolean releaseVmCapacity(VirtualMachine vm, boolean moveFromReserved, boolean moveToReservered, Long hostId); void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost); @@ -133,8 +134,6 @@ boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean check void updateCapacityForHost(Host host); - void updateCapacityForHost(Host host, Map offeringsMap); - /** * @param pool storage pool * @param templateForVmCreation template that will be used for vm creation @@ -151,12 +150,12 @@ boolean checkIfHostHasCapacity(long hostId, Integer cpu, long ram, boolean check /** * Check if specified host has capability to support cpu cores and speed freq - * @param hostId the host to be checked + * @param host the host to be checked * @param cpuNum cpu number to check * @param cpuSpeed cpu Speed to check * @return true if the count of host's running VMs >= hypervisor limit */ - boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer cpuSpeed); + boolean checkIfHostHasCpuCapability(Host host, Integer cpuNum, Integer cpuSpeed); /** * Check if cluster will cross threshold if the cpu/memory requested are accommodated diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index b2ae8b898378..3b7e4e964e56 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -138,13 +138,13 @@ public interface ResourceManager extends ResourceService, Configurable { public List listAllHostsInOneZoneNotInClusterByHypervisors(List types, long dcId, long clusterId); - public List listAvailHypervisorInZone(Long hostId, Long zoneId); + public List listAvailHypervisorInZone(Long zoneId); public HostVO findHostByGuid(String guid); public HostVO findHostByName(String name); - HostStats getHostStatistics(long hostId); + HostStats getHostStatistics(Host host); Long getGuestOSCategoryId(long hostId); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index c3909bc56b0d..cf48d75bd1e5 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -22,6 +22,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -42,6 +43,7 @@ import com.cloud.offering.ServiceOffering; import com.cloud.storage.Storage.ImageFormat; import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; @@ -209,6 +211,10 @@ public interface StorageManager extends StorageService { ConfigKey HEURISTICS_SCRIPT_TIMEOUT = new ConfigKey<>("Advanced", Long.class, "heuristics.script.timeout", "3000", "The maximum runtime, in milliseconds, to execute the heuristic rule; if it is reached, a timeout will happen.", true); + ConfigKey StoragePoolHostConnectWorkers = new ConfigKey<>("Storage", Integer.class, + "storage.pool.host.connect.workers", "1", + "Number of worker threads to be used to connect hosts to a primary storage", true); + /** * should we execute in sequence not involving any storages? * @return tru if commands should execute in sequence @@ -360,6 +366,9 @@ static Boolean getFullCloneConfiguration(Long storeId) { String getStoragePoolMountFailureReason(String error); + void connectHostsToPool(DataStore primaryStore, List hostIds, Scope scope, + boolean handleStorageConflictException, boolean errorOnNoUpHost) throws CloudRuntimeException; + boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 27b3ac2d7511..fdc462064470 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -38,10 +38,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.configuration.Config; -import com.cloud.org.Cluster; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.db.GlobalLock; import org.apache.cloudstack.agent.lb.IndirectAgentLB; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -54,6 +50,8 @@ import org.apache.cloudstack.outofbandmanagement.dao.OutOfBandManagementDao; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.ThreadContext; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -80,6 +78,7 @@ import com.cloud.agent.transport.Request; import com.cloud.agent.transport.Response; import com.cloud.alert.AlertManager; +import com.cloud.configuration.Config; import com.cloud.configuration.ManagementServiceConfiguration; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; @@ -99,15 +98,18 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorGuruManager; +import com.cloud.org.Cluster; import com.cloud.resource.Discoverer; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; import com.cloud.resource.ServerResource; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; @@ -122,8 +124,6 @@ import com.cloud.utils.nio.NioServer; import com.cloud.utils.nio.Task; import com.cloud.utils.time.InaccurateClock; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.ThreadContext; /** * Implementation of the Agent Manager. This class controls the connection to the agents. @@ -140,6 +140,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected List> _creationMonitors = new ArrayList>(17); protected List _loadingAgents = new ArrayList(); private int _monitorId = 0; + // FIXME: this is causing contention private final Lock _agentStatusLock = new ReentrantLock(); @Inject @@ -195,6 +196,15 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected final ConfigKey Workers = new ConfigKey("Advanced", Integer.class, "workers", "5", "Number of worker threads handling remote agent connections.", false); protected final ConfigKey Port = new ConfigKey("Advanced", Integer.class, "port", "8250", "Port to listen on for remote agent connections.", false); + protected final ConfigKey RemoteAgentSslHandshakeTimeout = new ConfigKey<>("Advanced", + Integer.class, "agent.ssl.handshake.timeout", "30", + "Seconds after which SSL handshake times out during remote agent connections.", false); + protected final ConfigKey RemoteAgentSslHandshakeMinWorkers = new ConfigKey<>("Advanced", + Integer.class, "agent.ssl.handshake.min.workers", "5", + "Number of minimum worker threads handling SSL handshake with remote agents.", false); + protected final ConfigKey RemoteAgentSslHandshakeMaxWorkers = new ConfigKey<>("Advanced", + Integer.class, "agent.ssl.handshake.max.workers", "50", + "Number of maximum worker threads handling SSL handshake with remote agents.", false); protected final ConfigKey AlertWait = new ConfigKey("Advanced", Integer.class, "alert.wait", "1800", "Seconds to wait before alerting on a disconnected agent", true); protected final ConfigKey DirectAgentLoadSize = new ConfigKey("Advanced", Integer.class, "direct.agent.load.size", "16", @@ -211,8 +221,6 @@ public boolean configure(final String name, final Map params) th logger.info("Ping Timeout is {}.", mgmtServiceConf.getPingTimeout()); - final int threads = DirectAgentLoadSize.value(); - _nodeId = ManagementServerNode.getManagementServerId(); logger.info("Configuring AgentManagerImpl. management server node id(msid): {}.", _nodeId); @@ -223,21 +231,25 @@ public boolean configure(final String name, final Map params) th registerForHostEvents(new SetHostParamsListener(), true, true, false); - _executor = new ThreadPoolExecutor(threads, threads, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentTaskPool")); + final int agentTaskThreads = DirectAgentLoadSize.value(); + _executor = new ThreadPoolExecutor(agentTaskThreads, agentTaskThreads, 60L, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentTaskPool")); _connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue(), new NamedThreadFactory("AgentConnectTaskPool")); // allow core threads to time out even when there are no items in the queue _connectExecutor.allowCoreThreadTimeOut(true); - _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, this, caService); + _connection = new NioServer("AgentManager", Port.value(), Workers.value() + 10, + RemoteAgentSslHandshakeMinWorkers.value(), RemoteAgentSslHandshakeMaxWorkers.value(), this, + caService, RemoteAgentSslHandshakeTimeout.value()); logger.info("Listening on {} with {} workers.", Port.value(), Workers.value()); + final int directAgentPoolSize = DirectAgentPoolSize.value(); // executes all agent commands other than cron and ping - _directAgentExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgent")); + _directAgentExecutor = new ScheduledThreadPoolExecutor(directAgentPoolSize, new NamedThreadFactory("DirectAgent")); // executes cron and ping agent commands - _cronJobExecutor = new ScheduledThreadPoolExecutor(DirectAgentPoolSize.value(), new NamedThreadFactory("DirectAgentCronJob")); - logger.debug("Created DirectAgentAttache pool with size: {}.", DirectAgentPoolSize.value()); - _directAgentThreadCap = Math.round(DirectAgentPoolSize.value() * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 + _cronJobExecutor = new ScheduledThreadPoolExecutor(directAgentPoolSize, new NamedThreadFactory("DirectAgentCronJob")); + logger.debug("Created DirectAgentAttache pool with size: {}.", directAgentPoolSize); + _directAgentThreadCap = Math.round(directAgentPoolSize * DirectAgentThreadCap.value()) + 1; // add 1 to always make the value > 0 _monitorExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("AgentMonitor")); @@ -1802,7 +1814,9 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] { CheckTxnBeforeSending, Workers, Port, Wait, AlertWait, DirectAgentLoadSize, - DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait }; + DirectAgentPoolSize, DirectAgentThreadCap, EnableKVMAutoEnableDisable, ReadyCommandWait, + RemoteAgentSslHandshakeTimeout, RemoteAgentSslHandshakeMinWorkers, RemoteAgentSslHandshakeMaxWorkers + }; } protected class SetHostParamsListener implements Listener { diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index a7fea0f25331..c8f473ade55f 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -243,14 +243,14 @@ public Task create(final Task.Type type, final Link link, final byte[] data) { return new ClusteredAgentHandler(type, link, data); } - protected AgentAttache createAttache(final long id) { - logger.debug("create forwarding ClusteredAgentAttache for {}", id); - final HostVO host = _hostDao.findById(id); - final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); + + protected AgentAttache createAttache(final Host host) { + logger.debug("create forwarding ClusteredAgentAttache for {}", host.getId()); + final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName()); AgentAttache old = null; synchronized (_agents) { - old = _agents.get(id); - _agents.put(id, attache); + old = _agents.get(host.getId()); + _agents.put(host.getId(), attache); } if (old != null) { logger.debug("Remove stale agent attache from current management server"); @@ -546,12 +546,12 @@ protected AgentAttache getAttache(final Long hostId) throws AgentUnavailableExce if (agent == null || !agent.forForward()) { if (isHostOwnerSwitched(host)) { logger.debug("Host {} has switched to another management server, need to update agent map with a forwarding agent attache", hostId); - agent = createAttache(hostId); + agent = createAttache(host); } } if (agent == null) { final AgentUnavailableException ex = new AgentUnavailableException("Host with specified id is not in the right state: " + host.getStatus(), hostId); - ex.addProxyObject(_entityMgr.findById(Host.class, hostId).getUuid()); + ex.addProxyObject(host.getUuid()); throw ex; } @@ -1119,7 +1119,7 @@ protected boolean startRebalance(final long hostId) { final ClusteredDirectAgentAttache attache = (ClusteredDirectAgentAttache)_agents.get(hostId); if (attache != null && attache.getQueueSize() == 0 && attache.getNonRecurringListenersSize() == 0) { handleDisconnectWithoutInvestigation(attache, Event.StartAgentRebalance, true, true); - final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(hostId); + final ClusteredAgentAttache forwardAttache = (ClusteredAgentAttache)createAttache(host); if (forwardAttache == null) { logger.warn("Unable to create a forward attache for the host {} as a part of rebalance process", hostId); return false; diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 7c107ed6f547..9d42eada7b23 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -85,6 +85,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.cache.SingleCache; import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.cloudstack.vm.UnmanagedVMsManager; @@ -406,6 +407,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac private DomainDao domainDao; @Inject ResourceCleanupService resourceCleanupService; + @Inject + VmWorkJobDao vmWorkJobDao; + + private SingleCache> vmIdsInProgressCache; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -450,6 +455,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Long.class, "systemvm.root.disk.size", "-1", "Size of root volume (in GB) of system VMs and virtual routers", true); + private boolean syncTransitioningVmPowerState; + ScheduledExecutorService _executor = null; private long _nodeId; @@ -816,6 +823,7 @@ private void sendModifyTargetsCommand(ModifyTargetsCommand cmd, long hostId) { @Override public boolean start() { + vmIdsInProgressCache = new SingleCache<>(10, vmWorkJobDao::listVmIdsWithPendingJob); _executor.scheduleAtFixedRate(new CleanupTask(), 5, VmJobStateReportInterval.value(), TimeUnit.SECONDS); _executor.scheduleAtFixedRate(new TransitionTask(), VmOpCleanupInterval.value(), VmOpCleanupInterval.value(), TimeUnit.SECONDS); cancelWorkItems(_nodeId); @@ -843,6 +851,8 @@ public boolean configure(final String name, final Map xmlParams) _messageBus.subscribe(VirtualMachineManager.Topics.VM_POWER_STATE, MessageDispatcher.getDispatcher(this)); + syncTransitioningVmPowerState = Boolean.TRUE.equals(VmSyncPowerStateTransitioning.value()); + return true; } @@ -3504,7 +3514,7 @@ public DataCenterDeployment getMigrationDeployment(final VirtualMachine vm, fina if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId()) && (HypervisorType.VMware.equals(host.getHypervisorType()) || !checkIfVmHasClusterWideVolumes(vm.getId()))) { logger.info("Searching for hosts in the zone for vm migration"); - List clustersToExclude = _clusterDao.listAllClusters(host.getDataCenterId()); + List clustersToExclude = _clusterDao.listAllClusterIds(host.getDataCenterId()); List clusterList = _clusterDao.listByDcHyType(host.getDataCenterId(), host.getHypervisorType().toString()); for (ClusterVO cluster : clusterList) { clustersToExclude.remove(cluster.getId()); @@ -3798,7 +3808,6 @@ public boolean processCommands(final long agentId, final long seq, final Command if (ping.getHostVmStateReport() != null) { _syncMgr.processHostVmStatePingReport(agentId, ping.getHostVmStateReport(), ping.getOutOfBand()); } - scanStalledVMInTransitionStateOnUpHost(agentId); processed = true; } @@ -4756,7 +4765,8 @@ public ConfigKey[] getConfigKeys() { VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool, HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, SystemVmRootDiskSize, - AllowExposeDomainInMetadata, MetadataCustomCloudName, VmMetadataManufacturer, VmMetadataProductName + AllowExposeDomainInMetadata, MetadataCustomCloudName, VmMetadataManufacturer, VmMetadataProductName, + VmSyncPowerStateTransitioning }; } @@ -4955,19 +4965,36 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { } private void scanStalledVMInTransitionStateOnUpHost(final long hostId) { - final long stallThresholdInMs = VmJobStateReportInterval.value() + (VmJobStateReportInterval.value() >> 1); - final Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs); - final List mostlikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostId, cutTime); - for (final Long vmId : mostlikelyStoppedVMs) { - final VMInstanceVO vm = _vmDao.findById(vmId); - assert vm != null; + if (!syncTransitioningVmPowerState) { + return; + } + // Check VM that is stuck in Starting, Stopping, Migrating states, we won't check + // VMs in expunging state (this need to be handled specially) + // + // checking condition + // 1) no pending VmWork job + // 2) on hostId host and host is UP + // + // When host is UP, sooner or later we will get a report from the host about the VM, + // however, if VM is missing from the host report (it may happen in out of band changes + // or from behaviour of XS/KVM by design), the VM may not get a chance to run the state-sync logic + // + // Therefore, we will scan those VMs on UP host based on last update timestamp, if the host is UP + // and a VM stalls for status update, we will consider them to be powered off + // (which is relatively safe to do so) + final long stallThresholdInMs = VmJobStateReportInterval.value() * 2; + final long cutTime = new Date(DateUtil.currentGMTTime().getTime() - stallThresholdInMs).getTime(); + if (!_hostDao.isHostUp(hostId)) { + return; + } + final List hostTransitionVms = _vmDao.listByHostAndState(hostId, State.Starting, State.Stopping, State.Migrating); + final List mostLikelyStoppedVMs = listStalledVMInTransitionStateOnUpHost(hostTransitionVms, cutTime); + for (final VMInstanceVO vm : mostLikelyStoppedVMs) { handlePowerOffReportWithNoPendingJobsOnVM(vm); } - final List vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostId, cutTime); - for (final Long vmId : vmsWithRecentReport) { - final VMInstanceVO vm = _vmDao.findById(vmId); - assert vm != null; + final List vmsWithRecentReport = listVMInTransitionStateWithRecentReportOnUpHost(hostTransitionVms, cutTime); + for (final VMInstanceVO vm : vmsWithRecentReport) { if (vm.getPowerState() == PowerState.PowerOn) { handlePowerOnReportWithNoPendingJobsOnVM(vm); } else { @@ -4988,89 +5015,58 @@ private void scanStalledVMInTransitionStateOnDisconnectedHosts() { } } - private List listStalledVMInTransitionStateOnUpHost(final long hostId, final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + - "AND h.id = ? AND i.power_state_update_time < ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; - - final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - - pstmt.setLong(1, hostId); - pstmt.setString(2, cutTimeStr); - pstmt.setInt(3, JobInfo.Status.IN_PROGRESS.ordinal()); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\"} due to [{}].", sql, hostId, cutTimeStr, e.getMessage(), e); - } + private List listStalledVMInTransitionStateOnUpHost( + final List transitioningVms, final long cutTime) { + if (CollectionUtils.isEmpty(transitioningVms)) { + return transitioningVms; } - return l; + List vmIdsInProgress = vmIdsInProgressCache.get(); + return transitioningVms.stream() + .filter(v -> v.getPowerStateUpdateTime().getTime() < cutTime && !vmIdsInProgress.contains(v.getId())) + .collect(Collectors.toList()); } - private List listVMInTransitionStateWithRecentReportOnUpHost(final long hostId, final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status = 'UP' " + - "AND h.id = ? AND i.power_state_update_time > ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; - - final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); - - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - - pstmt.setLong(1, hostId); - pstmt.setString(2, cutTimeStr); - pstmt.setInt(3, jobStatusInProgress); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (final SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"h.id\": {}, \"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, hostId, cutTimeStr, jobStatusInProgress, e.getMessage(), e); - } - return l; + private List listVMInTransitionStateWithRecentReportOnUpHost( + final List transitioningVms, final long cutTime) { + if (CollectionUtils.isEmpty(transitioningVms)) { + return transitioningVms; } + List vmIdsInProgress = vmIdsInProgressCache.get(); + return transitioningVms.stream() + .filter(v -> v.getPowerStateUpdateTime().getTime() > cutTime && !vmIdsInProgress.contains(v.getId())) + .collect(Collectors.toList()); } private List listStalledVMInTransitionStateOnDisconnectedHosts(final Date cutTime) { - final String sql = "SELECT i.* FROM vm_instance as i, host as h WHERE h.status != 'UP' " + - "AND i.power_state_update_time < ? AND i.host_id = h.id " + - "AND (i.state ='Starting' OR i.state='Stopping' OR i.state='Migrating') " + - "AND i.id NOT IN (SELECT w.vm_instance_id FROM vm_work_job AS w JOIN async_job AS j ON w.id = j.id WHERE j.job_status = ?)" + - "AND i.removed IS NULL"; + final String sql = "SELECT i.* " + + "FROM vm_instance AS i " + + "INNER JOIN host AS h ON i.host_id = h.id " + + "WHERE h.status != 'UP' " + + " AND i.power_state_update_time < ? " + + " AND i.state IN ('Starting', 'Stopping', 'Migrating') " + + " AND i.id NOT IN (SELECT vm_instance_id FROM vm_work_job AS w " + + " INNER JOIN async_job AS j ON w.id = j.id " + + " WHERE j.job_status = ?) " + + " AND i.removed IS NULL"; final List l = new ArrayList<>(); - try (TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB)) { - String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); - int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + String cutTimeStr = DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), cutTime); + int jobStatusInProgress = JobInfo.Status.IN_PROGRESS.ordinal(); - try { - PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); + try { + PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql); - pstmt.setString(1, cutTimeStr); - pstmt.setInt(2, jobStatusInProgress); - final ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - l.add(rs.getLong(1)); - } - } catch (final SQLException e) { - logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); + pstmt.setString(1, cutTimeStr); + pstmt.setInt(2, jobStatusInProgress); + final ResultSet rs = pstmt.executeQuery(); + while (rs.next()) { + l.add(rs.getLong(1)); } - return l; + } catch (final SQLException e) { + logger.error("Unable to execute SQL [{}] with params {\"i.power_state_update_time\": \"{}\", \"j.job_status\": {}} due to [{}].", sql, cutTimeStr, jobStatusInProgress, e.getMessage(), e); } + return l; } public class VmStateSyncOutcome extends OutcomeImpl { @@ -5950,29 +5946,23 @@ protected VMInstanceVO findVmById(Long vmId) { } @Override - public HashMap getVirtualMachineStatistics(long hostId, String hostName, List vmIds) { + public HashMap getVirtualMachineStatistics(Host host, List vmIds) { HashMap vmStatsById = new HashMap<>(); if (CollectionUtils.isEmpty(vmIds)) { return vmStatsById; } - Map vmMap = new HashMap<>(); - for (Long vmId : vmIds) { - vmMap.put(vmId, _vmDao.findById(vmId)); - } - return getVirtualMachineStatistics(hostId, hostName, vmMap); + Map vmMap = _vmDao.getNameIdMapForVmIds(vmIds); + return getVirtualMachineStatistics(host, vmMap); } @Override - public HashMap getVirtualMachineStatistics(long hostId, String hostName, Map vmMap) { + public HashMap getVirtualMachineStatistics(Host host, Map vmInstanceNameIdMap) { HashMap vmStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM statistics."); return vmStatsById; @@ -5983,23 +5973,20 @@ protected VMInstanceVO findVmById(Long vmId) { return vmStatsById; } for (Map.Entry entry : vmStatsByName.entrySet()) { - vmStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmStatsById; } @Override - public HashMap> getVmDiskStatistics(long hostId, String hostName, Map vmMap) { + public HashMap> getVmDiskStatistics(Host host, Map vmInstanceNameIdMap) { HashMap> vmDiskStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmDiskStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmDiskStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmDiskStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM disk statistics."); return vmDiskStatsById; @@ -6010,23 +5997,20 @@ public HashMap> getVmDiskStatistics(long hostI return vmDiskStatsById; } for (Map.Entry> entry: vmDiskStatsByName.entrySet()) { - vmDiskStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmDiskStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmDiskStatsById; } @Override - public HashMap> getVmNetworkStatistics(long hostId, String hostName, Map vmMap) { + public HashMap> getVmNetworkStatistics(Host host, Map vmInstanceNameIdMap) { HashMap> vmNetworkStatsById = new HashMap<>(); - if (MapUtils.isEmpty(vmMap)) { + if (MapUtils.isEmpty(vmInstanceNameIdMap)) { return vmNetworkStatsById; } - Map vmNames = new HashMap<>(); - for (Map.Entry vmEntry : vmMap.entrySet()) { - vmNames.put(vmEntry.getValue().getInstanceName(), vmEntry.getKey()); - } - Answer answer = _agentMgr.easySend(hostId, new GetVmNetworkStatsCommand(new ArrayList<>(vmNames.keySet()), _hostDao.findById(hostId).getGuid(), hostName)); + Answer answer = _agentMgr.easySend(host.getId(), new GetVmNetworkStatsCommand( + new ArrayList<>(vmInstanceNameIdMap.keySet()), host.getGuid(), host.getName())); if (answer == null || !answer.getResult()) { logger.warn("Unable to obtain VM network statistics."); return vmNetworkStatsById; @@ -6037,7 +6021,7 @@ public HashMap> getVmNetworkStatistics(long return vmNetworkStatsById; } for (Map.Entry> entry: vmNetworkStatsByName.entrySet()) { - vmNetworkStatsById.put(vmNames.get(entry.getKey()), entry.getValue()); + vmNetworkStatsById.put(vmInstanceNameIdMap.get(entry.getKey()), entry.getValue()); } } return vmNetworkStatsById; diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java index 4c89a75d2151..4b7fd8cc8327 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachinePowerStateSyncImpl.java @@ -16,23 +16,24 @@ // under the License. package com.cloud.vm; -import java.text.SimpleDateFormat; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import javax.inject.Inject; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.logging.log4j.Logger; +import org.apache.commons.collections.MapUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.agent.api.HostVmStateReportEntry; import com.cloud.configuration.ManagementServiceConfiguration; import com.cloud.utils.DateUtil; -import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.dao.VMInstanceDao; public class VirtualMachinePowerStateSyncImpl implements VirtualMachinePowerStateSync { @@ -67,119 +68,121 @@ public void processHostVmStatePingReport(long hostId, Map translatedInfo, boolean force) { - - logger.debug("Process VM state report. host: {}, number of records in report: {}.", hostId, translatedInfo.size()); - - for (Map.Entry entry : translatedInfo.entrySet()) { - - logger.debug("VM state report. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); - - if (_instanceDao.updatePowerState(entry.getKey(), hostId, entry.getValue(), DateUtil.currentGMTTime())) { - logger.debug("VM state report is updated. host: {}, vm id: {}, power state: {}.", hostId, entry.getKey(), entry.getValue()); - - _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, entry.getKey()); - } else { - logger.trace("VM power state does not change, skip DB writing. vm id: {}.", entry.getKey()); + private void updateAndPublishVmPowerStates(long hostId, Map instancePowerStates, + Date updateTime) { + if (instancePowerStates.isEmpty()) { + return; + } + Set vmIds = instancePowerStates.keySet(); + Map notUpdated = _instanceDao.updatePowerState(instancePowerStates, hostId, + updateTime); + if (notUpdated.size() <= vmIds.size()) { + for (Long vmId : vmIds) { + if (!notUpdated.isEmpty() && !notUpdated.containsKey(vmId)) { + logger.debug("VM state report is updated. host: {}, vm id: {}}, power state: {}}", + hostId, vmId, instancePowerStates.get(vmId)); + _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, + PublishScope.GLOBAL, vmId); + continue; + } + logger.trace("VM power state does not change, skip DB writing. vm id: {}", vmId); } } + } + private void processMissingVmReport(long hostId, Set vmIds, boolean force) { // any state outdates should be checked against the time before this list was retrieved Date startTime = DateUtil.currentGMTTime(); // for all running/stopping VMs, we provide monitoring of missing report - List vmsThatAreMissingReport = _instanceDao.findByHostInStates(hostId, VirtualMachine.State.Running, - VirtualMachine.State.Stopping, VirtualMachine.State.Starting); - java.util.Iterator it = vmsThatAreMissingReport.iterator(); - while (it.hasNext()) { - VMInstanceVO instance = it.next(); - if (translatedInfo.get(instance.getId()) != null) - it.remove(); - } - + List vmsThatAreMissingReport = _instanceDao.findByHostInStatesExcluding(hostId, vmIds, + VirtualMachine.State.Running, VirtualMachine.State.Stopping, VirtualMachine.State.Starting); // here we need to be wary of out of band migration as opposed to other, more unexpected state changes - if (vmsThatAreMissingReport.size() > 0) { - Date currentTime = DateUtil.currentGMTTime(); - logger.debug("Run missing VM report. current time: {}", currentTime.getTime()); - - // 2 times of sync-update interval for graceful period - long milliSecondsGracefullPeriod = mgmtServiceConf.getPingInterval() * 2000L; - - for (VMInstanceVO instance : vmsThatAreMissingReport) { - - // Make sure powerState is up to date for missing VMs - try { - if (!force && !_instanceDao.isPowerStateUpToDate(instance.getId())) { - logger.warn("Detected missing VM but power state is outdated, wait for another process report run for VM id: {}.", instance.getId()); - _instanceDao.resetVmPowerStateTracking(instance.getId()); - continue; - } - } catch (CloudRuntimeException e) { - logger.warn("Checked for missing powerstate of a none existing vm", e); - continue; - } + if (vmsThatAreMissingReport.isEmpty()) { + return; + } + Date currentTime = DateUtil.currentGMTTime(); + if (logger.isDebugEnabled()) { + logger.debug("Run missing VM report. current time: " + currentTime.getTime()); + } + if (!force) { + List outdatedVms = vmsThatAreMissingReport.stream() + .filter(v -> !_instanceDao.isPowerStateUpToDate(v)) + .map(VMInstanceVO::getId) + .collect(Collectors.toList()); + _instanceDao.resetVmPowerStateTracking(outdatedVms); + vmsThatAreMissingReport = vmsThatAreMissingReport.stream() + .filter(v -> !outdatedVms.contains(v.getId())) + .collect(Collectors.toList()); + } - Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); + // 2 times of sync-update interval for graceful period + long milliSecondsGracefulPeriod = mgmtServiceConf.getPingInterval() * 2000L; + Map instancePowerStates = new HashMap<>(); + for (VMInstanceVO instance : vmsThatAreMissingReport) { + Date vmStateUpdateTime = instance.getPowerStateUpdateTime(); + if (vmStateUpdateTime == null) { + logger.warn("VM power state update time is null, falling back to update time for vm id: " + instance.getId()); + vmStateUpdateTime = instance.getUpdateTime(); if (vmStateUpdateTime == null) { - logger.warn("VM power state update time is null, falling back to update time for vm id: {}.", instance.getId()); - vmStateUpdateTime = instance.getUpdateTime(); - if (vmStateUpdateTime == null) { - logger.warn("VM update time is null, falling back to creation time for vm id: {}", instance.getId()); - vmStateUpdateTime = instance.getCreated(); - } + logger.warn("VM update time is null, falling back to creation time for vm id: " + instance.getId()); + vmStateUpdateTime = instance.getCreated(); } - - String lastTime = new SimpleDateFormat("yyyy/MM/dd'T'HH:mm:ss.SSS'Z'").format(vmStateUpdateTime); - logger.debug("Detected missing VM. host: {}, vm id: {}({}), power state: {}, last state update: {}" + } + if (logger.isDebugEnabled()) { + logger.debug( + String.format("Detected missing VM. host: %d, vm id: %d(%s), power state: %s, last state update: %s" , hostId , instance.getId() , instance.getUuid() , VirtualMachine.PowerState.PowerReportMissing - , lastTime); - - long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); - - if (force || milliSecondsSinceLastStateUpdate > milliSecondsGracefullPeriod) { - logger.debug("vm id: {} - time since last state update({}ms) has passed graceful period.", instance.getId(), milliSecondsSinceLastStateUpdate); - - // this is were a race condition might have happened if we don't re-fetch the instance; - // between the startime of this job and the currentTime of this missing-branch - // an update might have occurred that we should not override in case of out of band migration - if (_instanceDao.updatePowerState(instance.getId(), hostId, VirtualMachine.PowerState.PowerReportMissing, startTime)) { - logger.debug("VM state report is updated. host: {}, vm id: {}, power state: PowerReportMissing.", hostId, instance.getId()); - - _messageBus.publish(null, VirtualMachineManager.Topics.VM_POWER_STATE, PublishScope.GLOBAL, instance.getId()); - } else { - logger.debug("VM power state does not change, skip DB writing. vm id: {}", instance.getId()); - } - } else { - logger.debug("vm id: {} - time since last state update({}ms) has not passed graceful period yet.", instance.getId(), milliSecondsSinceLastStateUpdate); - } + , DateUtil.getOutputString(vmStateUpdateTime))); + } + long milliSecondsSinceLastStateUpdate = currentTime.getTime() - vmStateUpdateTime.getTime(); + if (force || (milliSecondsSinceLastStateUpdate > milliSecondsGracefulPeriod)) { + logger.debug("vm id: {} - time since last state update({} ms) has passed graceful period", + instance.getId(), milliSecondsSinceLastStateUpdate); + // this is where a race condition might have happened if we don't re-fetch the instance; + // between the startime of this job and the currentTime of this missing-branch + // an update might have occurred that we should not override in case of out of band migration + instancePowerStates.put(instance.getId(), VirtualMachine.PowerState.PowerReportMissing); + } else { + logger.debug("vm id: {} - time since last state update({} ms) has not passed graceful period yet", + instance.getId(), milliSecondsSinceLastStateUpdate); } } + updateAndPublishVmPowerStates(hostId, instancePowerStates, startTime); + } + + private void processReport(long hostId, Map translatedInfo, boolean force) { + if (logger.isDebugEnabled()) { + logger.debug("Process VM state report. Host: {}, number of records in report: {}. VMs: [{}]", + hostId, + translatedInfo.size(), + translatedInfo.entrySet().stream().map(entry -> entry.getKey() + ":" + entry.getValue()) + .collect(Collectors.joining(", ")) + "]"); + } + updateAndPublishVmPowerStates(hostId, translatedInfo, DateUtil.currentGMTTime()); + + processMissingVmReport(hostId, translatedInfo.keySet(), force); logger.debug("Done with process of VM state report. host: {}", hostId); } @Override public Map convertVmStateReport(Map states) { - final HashMap map = new HashMap(); - if (states == null) { + final HashMap map = new HashMap<>(); + if (MapUtils.isEmpty(states)) { return map; } - + Map nameIdMap = _instanceDao.getNameIdMapForVmInstanceNames(states.keySet()); for (Map.Entry entry : states.entrySet()) { - VMInstanceVO vm = findVM(entry.getKey()); - if (vm != null) { - map.put(vm.getId(), entry.getValue().getState()); + Long id = nameIdMap.get(entry.getKey()); + if (id != null) { + map.put(id, entry.getValue().getState()); } else { logger.debug("Unable to find matched VM in CloudStack DB. name: {}", entry.getKey()); } } - return map; } - - private VMInstanceVO findVM(String vmName) { - return _instanceDao.findVMByInstanceName(vmName); - } } diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java index 9616f31d0c5c..1bb79ce417aa 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDao.java @@ -28,6 +28,8 @@ public interface CapacityDao extends GenericDao { CapacityVO findByHostIdType(Long hostId, short capacityType); + List listByHostIdTypes(Long hostId, List capacityTypes); + List listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone); List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType); diff --git a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java index 3acae985af4b..5e7eee4566c1 100644 --- a/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -671,6 +671,18 @@ public CapacityVO findByHostIdType(Long hostId, short capacityType) { return findOneBy(sc); } + @Override + public List listByHostIdTypes(Long hostId, List capacityTypes) { + SearchBuilder sb = createSearchBuilder(); + sb.and("hostId", sb.entity().getHostOrPoolId(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getCapacityType(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("hostId", hostId); + sc.setParameters("type", capacityTypes.toArray()); + return listBy(sc); + } + @Override public List listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone) { TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java index 06c9c5255048..5daab544b216 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.dc; +import java.util.Collection; import java.util.Map; import com.cloud.utils.db.GenericDao; @@ -29,6 +30,8 @@ public interface ClusterDetailsDao extends GenericDao { ClusterDetailsVO findDetail(long clusterId, String name); + Map findDetails(long clusterId, Collection names); + void deleteDetails(long clusterId); String getVmwareDcName(Long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java index 0e40f8475c16..a4f6acb90576 100644 --- a/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/ClusterDetailsDaoImpl.java @@ -16,13 +16,16 @@ // under the License. package com.cloud.dc; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.commons.collections.CollectionUtils; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; @@ -82,6 +85,23 @@ public Map findDetails(long clusterId) { return details; } + @Override + public Map findDetails(long clusterId, Collection names) { + if (CollectionUtils.isEmpty(names)) { + return new HashMap<>(); + } + SearchBuilder sb = createSearchBuilder(); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("clusterId", clusterId); + sc.setParameters("name", names.toArray()); + List results = search(sc, null); + return results.stream() + .collect(Collectors.toMap(ClusterDetailsVO::getName, ClusterDetailsVO::getValue)); + } + @Override public void deleteDetails(long clusterId) { SearchCriteria sc = ClusterSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index 6ecfdaeb0584..bf12abd5114c 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -16,15 +16,15 @@ // under the License. package com.cloud.dc.dao; +import java.util.List; +import java.util.Map; +import java.util.Set; + import com.cloud.cpu.CPU; import com.cloud.dc.ClusterVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; -import java.util.List; -import java.util.Map; -import java.util.Set; - public interface ClusterDao extends GenericDao { List listByPodId(long podId); @@ -36,7 +36,7 @@ public interface ClusterDao extends GenericDao { List getAvailableHypervisorInZone(Long zoneId); - Set getDistictAvailableHypervisorsAcrossClusters(); + Set getDistinctAvailableHypervisorsAcrossClusters(); List listByDcHyType(long dcId, String hyType); @@ -46,9 +46,13 @@ public interface ClusterDao extends GenericDao { List listClustersWithDisabledPods(long zoneId); + Integer countAllByDcId(long zoneId); + + Integer countAllManagedAndEnabledByDcId(long zoneId); + List listClustersByDcId(long zoneId); - List listAllClusters(Long zoneId); + List listAllClusterIds(Long zoneId); boolean getSupportsResigning(long clusterId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index 9a56f0f2d949..af6b83976430 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -16,6 +16,21 @@ // under the License. package com.cloud.dc.dao; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import org.springframework.stereotype.Component; + import com.cloud.cpu.CPU; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; @@ -23,6 +38,7 @@ import com.cloud.dc.HostPodVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; +import com.cloud.org.Managed; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; @@ -32,19 +48,6 @@ import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; -import org.springframework.stereotype.Component; - -import javax.inject.Inject; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; @Component public class ClusterDaoImpl extends GenericDaoBase implements ClusterDao { @@ -58,7 +61,6 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder ClusterSearch; protected final SearchBuilder ClusterDistinctArchSearch; protected final SearchBuilder ClusterArchSearch; - protected GenericSearchBuilder ClusterIdSearch; private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( "; @@ -98,6 +100,8 @@ public ClusterDaoImpl() { ZoneClusterSearch = createSearchBuilder(); ZoneClusterSearch.and("dataCenterId", ZoneClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + ZoneClusterSearch.and("allocationState", ZoneClusterSearch.entity().getAllocationState(), Op.EQ); + ZoneClusterSearch.and("managedState", ZoneClusterSearch.entity().getManagedState(), Op.EQ); ZoneClusterSearch.done(); ClusterIdSearch = createSearchBuilder(Long.class); @@ -167,23 +171,15 @@ public List getAvailableHypervisorInZone(Long zoneId) { sc.setParameters("zoneId", zoneId); } List clusters = listBy(sc); - List hypers = new ArrayList(4); - for (ClusterVO cluster : clusters) { - hypers.add(cluster.getHypervisorType()); - } - - return hypers; + return clusters.stream() + .map(ClusterVO::getHypervisorType) + .distinct() + .collect(Collectors.toList()); } @Override - public Set getDistictAvailableHypervisorsAcrossClusters() { - SearchCriteria sc = ClusterSearch.create(); - List clusters = listBy(sc); - Set hypers = new HashSet<>(); - for (ClusterVO cluster : clusters) { - hypers.add(cluster.getHypervisorType()); - } - return hypers; + public Set getDistinctAvailableHypervisorsAcrossClusters() { + return new HashSet<>(getAvailableHypervisorInZone(null)); } @Override @@ -266,6 +262,23 @@ public List listClustersWithDisabledPods(long zoneId) { return customSearch(sc, null); } + @Override + public Integer countAllByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + return getCount(sc); + } + + @Override + public Integer countAllManagedAndEnabledByDcId(long zoneId) { + SearchCriteria sc = ZoneClusterSearch.create(); + sc.setParameters("dataCenterId", zoneId); + sc.setParameters("allocationState", Grouping.AllocationState.Enabled); + sc.setParameters("managedState", Managed.ManagedState.Managed); + + return getCount(sc); + } + @Override public List listClustersByDcId(long zoneId) { SearchCriteria sc = ZoneClusterSearch.create(); @@ -289,7 +302,7 @@ public boolean remove(Long id) { } @Override - public List listAllClusters(Long zoneId) { + public List listAllClusterIds(Long zoneId) { SearchCriteria sc = ClusterIdSearch.create(); if (zoneId != null) { sc.setParameters("dataCenterId", zoneId); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index 48b9c83c64c6..ba01e31f80a8 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -294,8 +294,7 @@ public int countIpAddressUsage(final String ipAddress, final long podId, final l sc.addAnd("podId", SearchCriteria.Op.EQ, podId); sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, dcId); - List result = listBy(sc); - return result.size(); + return getCount(sc); } public DataCenterIpAddressDaoImpl() { diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java index 1c29e6a944c6..ff6682497791 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterVnetDaoImpl.java @@ -81,7 +81,7 @@ public List listAllocatedVnets(long physicalNetworkId) { public int countAllocatedVnets(long physicalNetworkId) { SearchCriteria sc = DcSearchAllocated.create(); sc.setParameters("physicalNetworkId", physicalNetworkId); - return listBy(sc).size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index a2df6db44e51..003bf4a34a62 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -27,6 +27,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.info.RunningHostCountInfo; import com.cloud.resource.ResourceState; +import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; @@ -39,8 +40,14 @@ public interface HostDao extends GenericDao, StateDao status); + Integer countAllByTypeInZone(long zoneId, final Host.Type type); + Integer countUpAndEnabledHostsInZone(long zoneId); + + Pair countAllHostsAndCPUSocketsByType(Type type); + /** * Mark all hosts associated with a certain management server * as disconnected. @@ -75,32 +82,41 @@ public interface HostDao extends GenericDao, StateDao findHypervisorHostInCluster(long clusterId); + HostVO findAnyStateHypervisorHostInCluster(long clusterId); + HostVO findOldestExistentHypervisorHostInCluster(long clusterId); List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag); List findByDataCenterId(Long zoneId); + List listIdsByDataCenterId(Long zoneId); + List findByPodId(Long podId); + List listIdsByPodId(Long podId); + List findByClusterId(Long clusterId); - List findByClusterIdAndEncryptionSupport(Long clusterId); + List listIdsByClusterId(Long clusterId); - /** - * Returns hosts that are 'Up' and 'Enabled' from the given Data Center/Zone - */ - List listByDataCenterId(long id); + List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId); + + List listIdsByType(Type type); + + List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType); + + List findByClusterIdAndEncryptionSupport(Long clusterId); /** - * Returns hosts that are from the given Data Center/Zone and at a given state (e.g. Creating, Enabled, Disabled, etc). + * Returns host Ids that are 'Up' and 'Enabled' from the given Data Center/Zone */ - List listByDataCenterIdAndState(long id, ResourceState state); + List listEnabledIdsByDataCenterId(long id); /** - * Returns hosts that are 'Up' and 'Disabled' from the given Data Center/Zone + * Returns host Ids that are 'Up' and 'Disabled' from the given Data Center/Zone */ - List listDisabledByDataCenterId(long id); + List listDisabledIdsByDataCenterId(long id); List listByDataCenterIdAndHypervisorType(long zoneId, Hypervisor.HypervisorType hypervisorType); @@ -110,8 +126,6 @@ public interface HostDao extends GenericDao, StateDao listAllHostsThatHaveNoRuleTag(Host.Type type, Long clusterId, Long podId, Long dcId); - List listAllHostsByType(Host.Type type); - HostVO findByPublicIp(String publicIp); List listClustersByHostTag(String hostTagOnOffering); @@ -171,4 +185,14 @@ public interface HostDao extends GenericDao, StateDao findClustersThatMatchHostTagRule(String computeOfferingTags); List listSsvmHostsWithPendingMigrateJobsOrderedByJobCount(); + + boolean isHostUp(long hostId); + + List findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(final Long zoneId, final Long clusterId, + final List resourceStates, final List types, + final List hypervisorTypes); + + List listDistinctHypervisorTypes(final Long zoneId); + + List listByIds(final List ids); } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 63950294654c..f43da5e81215 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -20,6 +20,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -45,8 +46,8 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.gpu.dao.HostGpuGroupsDao; import com.cloud.gpu.dao.VGPUTypesDao; -import com.cloud.host.Host; import com.cloud.host.DetailVO; +import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.HostTagVO; import com.cloud.host.HostVO; @@ -59,6 +60,7 @@ import com.cloud.org.Managed; import com.cloud.resource.ResourceState; import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -74,8 +76,6 @@ import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; -import java.util.Arrays; - @DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class HostDaoImpl extends GenericDaoBase implements HostDao { //FIXME: , ExternalIdDao { @@ -98,6 +98,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder TypePodDcStatusSearch; + protected SearchBuilder IdsSearch; protected SearchBuilder IdStatusSearch; protected SearchBuilder TypeDcSearch; protected SearchBuilder TypeDcStatusSearch; @@ -124,6 +125,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder UnmanagedApplianceSearch; protected SearchBuilder MaintenanceCountSearch; protected SearchBuilder HostTypeCountSearch; + protected SearchBuilder HostTypeClusterCountSearch; protected SearchBuilder ResponsibleMsCountSearch; protected SearchBuilder HostTypeZoneCountSearch; protected SearchBuilder ClusterStatusSearch; @@ -136,8 +138,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected SearchBuilder ManagedRoutingServersSearch; protected SearchBuilder SecondaryStorageVMSearch; - protected GenericSearchBuilder HostIdSearch; - protected GenericSearchBuilder HostsInStatusSearch; + protected GenericSearchBuilder HostsInStatusesSearch; protected GenericSearchBuilder CountRoutingByDc; protected SearchBuilder HostTransferSearch; protected SearchBuilder ClusterManagedSearch; @@ -187,12 +188,21 @@ public void init() { HostTypeCountSearch = createSearchBuilder(); HostTypeCountSearch.and("type", HostTypeCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeCountSearch.and("zoneId", HostTypeCountSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + HostTypeCountSearch.and("resourceState", HostTypeCountSearch.entity().getResourceState(), SearchCriteria.Op.EQ); HostTypeCountSearch.done(); ResponsibleMsCountSearch = createSearchBuilder(); ResponsibleMsCountSearch.and("managementServerId", ResponsibleMsCountSearch.entity().getManagementServerId(), SearchCriteria.Op.EQ); ResponsibleMsCountSearch.done(); + HostTypeClusterCountSearch = createSearchBuilder(); + HostTypeClusterCountSearch.and("cluster", HostTypeClusterCountSearch.entity().getClusterId(), SearchCriteria.Op.EQ); + HostTypeClusterCountSearch.and("type", HostTypeClusterCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeClusterCountSearch.and("status", HostTypeClusterCountSearch.entity().getStatus(), SearchCriteria.Op.IN); + HostTypeClusterCountSearch.and("removed", HostTypeClusterCountSearch.entity().getRemoved(), SearchCriteria.Op.NULL); + HostTypeClusterCountSearch.done(); + HostTypeZoneCountSearch = createSearchBuilder(); HostTypeZoneCountSearch.and("type", HostTypeZoneCountSearch.entity().getType(), SearchCriteria.Op.EQ); HostTypeZoneCountSearch.and("dc", HostTypeZoneCountSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); @@ -240,6 +250,10 @@ public void init() { TypeClusterStatusSearch.and("resourceState", TypeClusterStatusSearch.entity().getResourceState(), SearchCriteria.Op.EQ); TypeClusterStatusSearch.done(); + IdsSearch = createSearchBuilder(); + IdsSearch.and("id", IdsSearch.entity().getId(), SearchCriteria.Op.IN); + IdsSearch.done(); + IdStatusSearch = createSearchBuilder(); IdStatusSearch.and("id", IdStatusSearch.entity().getId(), SearchCriteria.Op.EQ); IdStatusSearch.and("states", IdStatusSearch.entity().getStatus(), SearchCriteria.Op.IN); @@ -386,14 +400,14 @@ public void init() { AvailHypevisorInZone.groupBy(AvailHypevisorInZone.entity().getHypervisorType()); AvailHypevisorInZone.done(); - HostsInStatusSearch = createSearchBuilder(Long.class); - HostsInStatusSearch.selectFields(HostsInStatusSearch.entity().getId()); - HostsInStatusSearch.and("dc", HostsInStatusSearch.entity().getDataCenterId(), Op.EQ); - HostsInStatusSearch.and("pod", HostsInStatusSearch.entity().getPodId(), Op.EQ); - HostsInStatusSearch.and("cluster", HostsInStatusSearch.entity().getClusterId(), Op.EQ); - HostsInStatusSearch.and("type", HostsInStatusSearch.entity().getType(), Op.EQ); - HostsInStatusSearch.and("statuses", HostsInStatusSearch.entity().getStatus(), Op.IN); - HostsInStatusSearch.done(); + HostsInStatusesSearch = createSearchBuilder(Long.class); + HostsInStatusesSearch.selectFields(HostsInStatusesSearch.entity().getId()); + HostsInStatusesSearch.and("dc", HostsInStatusesSearch.entity().getDataCenterId(), Op.EQ); + HostsInStatusesSearch.and("pod", HostsInStatusesSearch.entity().getPodId(), Op.EQ); + HostsInStatusesSearch.and("cluster", HostsInStatusesSearch.entity().getClusterId(), Op.EQ); + HostsInStatusesSearch.and("type", HostsInStatusesSearch.entity().getType(), Op.EQ); + HostsInStatusesSearch.and("statuses", HostsInStatusesSearch.entity().getStatus(), Op.IN); + HostsInStatusesSearch.done(); CountRoutingByDc = createSearchBuilder(Long.class); CountRoutingByDc.select(null, Func.COUNT, null); @@ -456,11 +470,6 @@ public void init() { HostsInClusterSearch.and("server", HostsInClusterSearch.entity().getManagementServerId(), SearchCriteria.Op.NNULL); HostsInClusterSearch.done(); - HostIdSearch = createSearchBuilder(Long.class); - HostIdSearch.selectFields(HostIdSearch.entity().getId()); - HostIdSearch.and("dataCenterId", HostIdSearch.entity().getDataCenterId(), Op.EQ); - HostIdSearch.done(); - searchBuilderFindByRuleTag = _hostTagsDao.createSearchBuilder(); searchBuilderFindByRuleTag.and("is_tag_a_rule", searchBuilderFindByRuleTag.entity().getIsTagARule(), Op.EQ); searchBuilderFindByRuleTag.or("tagDoesNotExist", searchBuilderFindByRuleTag.entity().getIsTagARule(), Op.NULL); @@ -492,8 +501,7 @@ public long countBy(long clusterId, ResourceState... states) { sc.setParameters("resourceState", (Object[])states); sc.setParameters("cluster", clusterId); - List hosts = listBy(sc); - return hosts.size(); + return getCount(sc); } @Override @@ -503,37 +511,63 @@ public Integer countAllByType(final Host.Type type) { return getCount(sc); } + @Override + public Integer countAllInClusterByTypeAndStates(Long clusterId, final Host.Type type, List status) { + SearchCriteria sc = HostTypeClusterCountSearch.create(); + if (clusterId != null) { + sc.setParameters("cluster", clusterId); + } + if (type != null) { + sc.setParameters("type", type); + } + if (status != null) { + sc.setParameters("status", status.toArray()); + } + return getCount(sc); + } + @Override public Integer countAllByTypeInZone(long zoneId, Type type) { SearchCriteria sc = HostTypeCountSearch.create(); sc.setParameters("type", type); - sc.setParameters("dc", zoneId); + sc.setParameters("zoneId", zoneId); return getCount(sc); } @Override - public List listByDataCenterId(long id) { - return listByDataCenterIdAndState(id, ResourceState.Enabled); + public Integer countUpAndEnabledHostsInZone(long zoneId) { + SearchCriteria sc = HostTypeCountSearch.create(); + sc.setParameters("type", Type.Routing); + sc.setParameters("resourceState", ResourceState.Enabled); + sc.setParameters("zoneId", zoneId); + return getCount(sc); } @Override - public List listByDataCenterIdAndState(long id, ResourceState state) { - SearchCriteria sc = scHostsFromZoneUpRouting(id); - sc.setParameters("resourceState", state); - return listBy(sc); + public Pair countAllHostsAndCPUSocketsByType(Type type) { + GenericSearchBuilder sb = createSearchBuilder(SumCount.class); + sb.select("sum", Func.SUM, sb.entity().getCpuSockets()); + sb.select("count", Func.COUNT, null); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("type", type); + SumCount result = customSearch(sc, null).get(0); + return new Pair<>((int)result.count, (int)result.sum); + } + + private List listIdsForRoutingByZoneIdAndResourceState(long zoneId, ResourceState state) { + return listIdsBy(Type.Routing, Status.Up, state, null, zoneId, null, null); } @Override - public List listDisabledByDataCenterId(long id) { - return listByDataCenterIdAndState(id, ResourceState.Disabled); + public List listEnabledIdsByDataCenterId(long id) { + return listIdsForRoutingByZoneIdAndResourceState(id, ResourceState.Enabled); } - private SearchCriteria scHostsFromZoneUpRouting(long id) { - SearchCriteria sc = DcSearch.create(); - sc.setParameters("dc", id); - sc.setParameters("status", Status.Up); - sc.setParameters("type", Host.Type.Routing); - return sc; + @Override + public List listDisabledIdsByDataCenterId(long id) { + return listIdsForRoutingByZoneIdAndResourceState(id, ResourceState.Disabled); } @Override @@ -1178,6 +1212,11 @@ public List findByDataCenterId(Long zoneId) { return listBy(sc); } + @Override + public List listIdsByDataCenterId(Long zoneId) { + return listIdsBy(Type.Routing, null, null, null, zoneId, null, null); + } + @Override public List findByPodId(Long podId) { SearchCriteria sc = PodSearch.create(); @@ -1185,6 +1224,11 @@ public List findByPodId(Long podId) { return listBy(sc); } + @Override + public List listIdsByPodId(Long podId) { + return listIdsBy(null, null, null, null, null, podId, null); + } + @Override public List findByClusterId(Long clusterId) { SearchCriteria sc = ClusterSearch.create(); @@ -1192,6 +1236,63 @@ public List findByClusterId(Long clusterId) { return listBy(sc); } + private List listIdsBy(Host.Type type, Status status, ResourceState resourceState, + HypervisorType hypervisorType, Long zoneId, Long podId, Long clusterId) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); + sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.EQ); + sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + if (type != null) { + sc.setParameters("type", type); + } + if (status != null) { + sc.setParameters("status", status); + } + if (resourceState != null) { + sc.setParameters("resourceState", resourceState); + } + if (hypervisorType != null) { + sc.setParameters("hypervisorType", hypervisorType); + } + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (podId != null) { + sc.setParameters("podId", podId); + } + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + return customSearch(sc, null); + } + + @Override + public List listIdsByClusterId(Long clusterId) { + return listIdsBy(null, null, null, null, null, null, clusterId); + } + + @Override + public List listIdsForUpRouting(Long zoneId, Long podId, Long clusterId) { + return listIdsBy(Type.Routing, Status.Up, null, null, zoneId, podId, clusterId); + } + + @Override + public List listIdsByType(Type type) { + return listIdsBy(type, null, null, null, null, null, null); + } + + @Override + public List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType) { + return listIdsBy(null, Status.Up, ResourceState.Enabled, hypervisorType, zoneId, null, null); + } + @Override public List findByClusterIdAndEncryptionSupport(Long clusterId) { SearchBuilder hostCapabilitySearch = _detailsDao.createSearchBuilder(); @@ -1244,6 +1345,15 @@ public List findHypervisorHostInCluster(long clusterId) { return listBy(sc); } + @Override + public HostVO findAnyStateHypervisorHostInCluster(long clusterId) { + SearchCriteria sc = TypeClusterStatusSearch.create(); + sc.setParameters("type", Host.Type.Routing); + sc.setParameters("cluster", clusterId); + List list = listBy(sc, new Filter(1)); + return list.isEmpty() ? null : list.get(0); + } + @Override public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) { SearchCriteria sc = TypeClusterStatusSearch.create(); @@ -1263,9 +1373,7 @@ public HostVO findOldestExistentHypervisorHostInCluster(long clusterId) { @Override public List listAllHosts(long zoneId) { - SearchCriteria sc = HostIdSearch.create(); - sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId); - return customSearch(sc, null); + return listIdsBy(null, null, null, null, zoneId, null, null); } @Override @@ -1449,15 +1557,6 @@ public List listOrderedHostsHypervisorVersionsInDatacenter(long datacent return result; } - @Override - public List listAllHostsByType(Host.Type type) { - SearchCriteria sc = TypeSearch.create(); - sc.setParameters("type", type); - sc.setParameters("resourceState", ResourceState.Enabled); - - return listBy(sc); - } - @Override public List listByType(Host.Type type) { SearchCriteria sc = TypeSearch.create(); @@ -1602,4 +1701,70 @@ private String createSqlFindHostToExecuteCommand(boolean useDisabledHosts) { } return String.format(sqlFindHostInZoneToExecuteCommand, hostResourceStatus); } + + @Override + public boolean isHostUp(long hostId) { + GenericSearchBuilder sb = createSearchBuilder(Status.class); + sb.and("id", sb.entity().getId(), Op.EQ); + sb.selectFields(sb.entity().getStatus()); + SearchCriteria sc = sb.create(); + List statuses = customSearch(sc, null); + return CollectionUtils.isNotEmpty(statuses) && Status.Up.equals(statuses.get(0)); + } + + @Override + public List findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(final Long zoneId, final Long clusterId, + final List resourceStates, final List types, + final List hypervisorTypes) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ); + sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.IN); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.IN); + if (CollectionUtils.isNotEmpty(hypervisorTypes)) { + sb.and().op(sb.entity().getHypervisorType(), SearchCriteria.Op.NULL); + sb.or("hypervisorTypes", sb.entity().getHypervisorType(), SearchCriteria.Op.IN); + sb.cp(); + } + sb.done(); + SearchCriteria sc = sb.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + if (clusterId != null) { + sc.setParameters("clusterId", clusterId); + } + if (CollectionUtils.isNotEmpty(hypervisorTypes)) { + sc.setParameters("hypervisorTypes", hypervisorTypes.toArray()); + } + sc.setParameters("resourceState", resourceStates.toArray()); + sc.setParameters("type", types.toArray()); + return customSearch(sc, null); + } + + @Override + public List listDistinctHypervisorTypes(final Long zoneId) { + GenericSearchBuilder sb = createSearchBuilder(HypervisorType.class); + sb.and("zoneId", sb.entity().getDataCenterId(), SearchCriteria.Op.EQ); + sb.and("type", sb.entity().getType(), SearchCriteria.Op.EQ); + sb.select(null, Func.DISTINCT, sb.entity().getHypervisorType()); + sb.done(); + SearchCriteria sc = sb.create(); + if (zoneId != null) { + sc.setParameters("zoneId", zoneId); + } + sc.setParameters("type", Type.Routing); + return customSearch(sc, null); + } + + @Override + public List listByIds(List ids) { + if (CollectionUtils.isEmpty(ids)) { + return new ArrayList<>(); + } + SearchCriteria sc = IdsSearch.create(); + sc.setParameters("id", ids.toArray()); + return search(sc, null); + } } diff --git a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java index aa143838c343..5499d04e3a1f 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/IPAddressDaoImpl.java @@ -421,7 +421,7 @@ public long countFreePublicIPs() { public long countFreeIpsInVlan(long vlanDbId) { SearchCriteria sc = VlanDbIdSearchUnallocated.create(); sc.setParameters("vlanDbId", vlanDbId); - return listBy(sc).size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java index fa448b026e45..0aae532eac57 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/NetworkDaoImpl.java @@ -415,8 +415,7 @@ public int getOtherPersistentNetworksCount(long id, String broadcastURI, boolean sc.setParameters("broadcastUri", broadcastURI); sc.setParameters("guestType", guestTypes); sc.setJoinParameters("persistent", "persistent", isPersistent); - List persistentNetworks = search(sc, null); - return persistentNetworks.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java index a37acdf60298..8229c3a62fc2 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java @@ -55,8 +55,7 @@ public Integer getCopyCmdCountForSSVM(Long id) { SearchCriteria sc = CommandSearch.create(); sc.setParameters("host_id", id); sc.setParameters("command_name", "CopyCommand"); - List copyCmds = customSearch(sc, null); - return copyCmds.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java index 48e63d8e2b55..ceb5b0a4fc1c 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDao.java @@ -54,7 +54,7 @@ List createSystemServiceOfferings(String name, String uniqueN List listPublicByCpuAndMemory(Integer cpus, Integer memory); - List listByHostTag(String tag); - ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskOfferingId, boolean includingRemoved); + + List listIdsByHostTag(String tag); } diff --git a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 706dcdc1b7b5..803522fa6aa6 100644 --- a/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -34,6 +34,7 @@ import com.cloud.storage.Storage.ProvisioningType; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; @@ -293,8 +294,9 @@ public ServiceOfferingVO findServiceOfferingByComputeOnlyDiskOffering(long diskO } @Override - public List listByHostTag(String tag) { - SearchBuilder sb = createSearchBuilder(); + public List listIdsByHostTag(String tag) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); sb.and("tagNotNull", sb.entity().getHostTag(), SearchCriteria.Op.NNULL); sb.and().op("tagEq", sb.entity().getHostTag(), SearchCriteria.Op.EQ); sb.or("tagStartLike", sb.entity().getHostTag(), SearchCriteria.Op.LIKE); @@ -302,11 +304,12 @@ public List listByHostTag(String tag) { sb.or("tagEndLike", sb.entity().getHostTag(), SearchCriteria.Op.LIKE); sb.cp(); sb.done(); - SearchCriteria sc = sb.create(); + SearchCriteria sc = sb.create(); + sc.setParameters("tagEq", tag); sc.setParameters("tagStartLike", tag + ",%"); sc.setParameters("tagMidLike", "%," + tag + ",%"); sc.setParameters("tagEndLike", "%," + tag); - return listBy(sc); + return customSearch(sc, null); } } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java index 62ef5b7570d1..639c2571541d 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java @@ -34,7 +34,7 @@ public interface StoragePoolHostDao extends GenericDao List findHostsConnectedToPools(List poolIds); - List> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); + boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public void deletePrimaryRecordsForHost(long hostId); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java index 987a42f410e7..5a466af348c3 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java @@ -55,11 +55,11 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase findHostsConnectedToPools(List poolIds) { } @Override - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { - ArrayList> l = new ArrayList>(); + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { + Long poolCount = 0L; String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; TransactionLegacy txn = TransactionLegacy.currentTxn(); - PreparedStatement pstmt = null; - try { - pstmt = txn.prepareAutoCloseStatement(sql); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql)) { pstmt.setLong(1, dcId); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - l.add(new Pair(rs.getLong(1), rs.getInt(2))); + poolCount = rs.getLong(1); + if (poolCount > 0) { + return true; + } } } catch (SQLException e) { logger.debug("SQLException: ", e); } - return l; + return false; } /** diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index 1c5a2cb4256a..3ac514530ced 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -67,6 +67,8 @@ public interface VMTemplateDao extends GenericDao, StateDao< public List userIsoSearch(boolean listRemoved); + List listAllReadySystemVMTemplates(Long zoneId); + VMTemplateVO findSystemVMTemplate(long zoneId); VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType); @@ -91,6 +93,5 @@ public interface VMTemplateDao extends GenericDao, StateDao< List listByIds(List ids); - List listByTemplateTag(String tag); - + List listIdsByTemplateTag(String tag); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 4665f6602512..7513848536b2 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -344,19 +344,12 @@ public boolean configure(String name, Map params) throws Configu readySystemTemplateSearch = createSearchBuilder(); readySystemTemplateSearch.and("state", readySystemTemplateSearch.entity().getState(), SearchCriteria.Op.EQ); readySystemTemplateSearch.and("templateType", readySystemTemplateSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); + readySystemTemplateSearch.and("hypervisorType", readySystemTemplateSearch.entity().getHypervisorType(), SearchCriteria.Op.IN); SearchBuilder templateDownloadSearch = _templateDataStoreDao.createSearchBuilder(); templateDownloadSearch.and("downloadState", templateDownloadSearch.entity().getDownloadState(), SearchCriteria.Op.IN); readySystemTemplateSearch.join("vmTemplateJoinTemplateStoreRef", templateDownloadSearch, templateDownloadSearch.entity().getTemplateId(), readySystemTemplateSearch.entity().getId(), JoinBuilder.JoinType.INNER); - SearchBuilder hostHyperSearch2 = _hostDao.createSearchBuilder(); - hostHyperSearch2.and("type", hostHyperSearch2.entity().getType(), SearchCriteria.Op.EQ); - hostHyperSearch2.and("zoneId", hostHyperSearch2.entity().getDataCenterId(), SearchCriteria.Op.EQ); - hostHyperSearch2.and("removed", hostHyperSearch2.entity().getRemoved(), SearchCriteria.Op.NULL); - hostHyperSearch2.groupBy(hostHyperSearch2.entity().getHypervisorType()); - - readySystemTemplateSearch.join("tmplHyper", hostHyperSearch2, hostHyperSearch2.entity().getHypervisorType(), readySystemTemplateSearch.entity() - .getHypervisorType(), JoinBuilder.JoinType.INNER); - hostHyperSearch2.done(); + readySystemTemplateSearch.groupBy(readySystemTemplateSearch.entity().getId()); readySystemTemplateSearch.done(); tmpltTypeHyperSearch2 = createSearchBuilder(); @@ -556,29 +549,35 @@ public VMTemplateVO findSystemVMTemplate(long zoneId) { } @Override - public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + public List listAllReadySystemVMTemplates(Long zoneId) { + List availableHypervisors = _hostDao.listDistinctHypervisorTypes(zoneId); + if (CollectionUtils.isEmpty(availableHypervisors)) { + return Collections.emptyList(); + } SearchCriteria sc = readySystemTemplateSearch.create(); sc.setParameters("templateType", Storage.TemplateType.SYSTEM); sc.setParameters("state", VirtualMachineTemplate.State.Active); - sc.setJoinParameters("tmplHyper", "type", Host.Type.Routing); - sc.setJoinParameters("tmplHyper", "zoneId", zoneId); - sc.setJoinParameters("vmTemplateJoinTemplateStoreRef", "downloadState", new VMTemplateStorageResourceAssoc.Status[] {VMTemplateStorageResourceAssoc.Status.DOWNLOADED, VMTemplateStorageResourceAssoc.Status.BYPASSED}); - + sc.setParameters("hypervisorType", availableHypervisors.toArray()); + sc.setJoinParameters("vmTemplateJoinTemplateStoreRef", "downloadState", + List.of(VMTemplateStorageResourceAssoc.Status.DOWNLOADED, + VMTemplateStorageResourceAssoc.Status.BYPASSED).toArray()); // order by descending order of id - List tmplts = listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, null)); - - if (tmplts.size() > 0) { - if (hypervisorType == HypervisorType.Any) { - return tmplts.get(0); - } - for (VMTemplateVO tmplt : tmplts) { - if (tmplt.getHypervisorType() == hypervisorType) { - return tmplt; - } - } + return listBy(sc, new Filter(VMTemplateVO.class, "id", false, null, null)); + } + @Override + public VMTemplateVO findSystemVMReadyTemplate(long zoneId, HypervisorType hypervisorType) { + List templates = listAllReadySystemVMTemplates(zoneId); + if (CollectionUtils.isEmpty(templates)) { + return null; } - return null; + if (hypervisorType == HypervisorType.Any) { + return templates.get(0); + } + return templates.stream() + .filter(t -> t.getHypervisorType() == hypervisorType) + .findFirst() + .orElse(null); } @Override @@ -687,13 +686,14 @@ public boolean remove(Long id) { } @Override - public List listByTemplateTag(String tag) { - SearchBuilder sb = createSearchBuilder(); + public List listIdsByTemplateTag(String tag) { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + sb.selectFields(sb.entity().getId()); sb.and("tag", sb.entity().getTemplateTag(), SearchCriteria.Op.EQ); sb.done(); - SearchCriteria sc = sb.create(); + SearchCriteria sc = sb.create(); sc.setParameters("tag", tag); - return listIncludingRemovedBy(sc); + return customSearchIncludingRemoved(sc, null); } @Override diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 0c4d707635aa..750dbf2bee0f 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@ -571,14 +571,6 @@ public long secondaryStorageUsedForAccount(long accountId) { } } - public static class SumCount { - public long sum; - public long count; - - public SumCount() { - } - } - @Override public List listVolumesToBeDestroyed() { SearchCriteria sc = AllFieldsSearch.create(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index 7314937ff5b2..f9f973f45cf9 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -861,7 +861,7 @@ public void updateSystemVmTemplates(final Connection conn) { public void doInTransactionWithoutResult(final TransactionStatus status) { Set hypervisorsListInUse = new HashSet(); try { - hypervisorsListInUse = clusterDao.getDistictAvailableHypervisorsAcrossClusters(); + hypervisorsListInUse = clusterDao.getDistinctAvailableHypervisorsAcrossClusters(); } catch (final Exception e) { LOGGER.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java index 1c2c4b3c7ce7..0b973d195deb 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DatabaseAccessObject.java @@ -114,6 +114,17 @@ public void createIndex(Connection conn, String tableName, String indexName, Str } } + public void renameIndex(Connection conn, String tableName, String oldName, String newName) { + String stmt = String.format("ALTER TABLE %s RENAME INDEX %s TO %s", tableName, oldName, newName); + logger.debug("Statement: {}", stmt); + try (PreparedStatement pstmt = conn.prepareStatement(stmt)) { + pstmt.execute(); + logger.debug("Renamed index {} to {}", oldName, newName); + } catch (SQLException e) { + logger.warn("Unable to rename index {} to {}", oldName, newName, e); + } + } + protected void closePreparedStatement(PreparedStatement pstmt, String errorMessage) { try { if (pstmt != null) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java index 51e6ac7b9a1d..2f90422adf8b 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/DbUpgradeUtils.java @@ -31,6 +31,12 @@ public static void addIndexIfNeeded(Connection conn, String tableName, String... } } + public static void renameIndexIfNeeded(Connection conn, String tableName, String oldName, String newName) { + if (!dao.indexExists(conn, tableName, oldName)) { + dao.renameIndex(conn, tableName, oldName, newName); + } + } + public static void addForeignKey(Connection conn, String tableName, String tableColumn, String foreignTableName, String foreignColumnName) { dao.addForeignKey(conn, tableName, tableColumn, foreignTableName, foreignColumnName); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41910to42000.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41910to42000.java index 4c26c6bde50b..5ce6b3abe918 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41910to42000.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41910to42000.java @@ -58,6 +58,7 @@ public InputStream[] getPrepareScripts() { @Override public void performDataMigration(Connection conn) { checkAndUpdateAffinityGroupNameCharSetToUtf8mb4(conn); + addIndexes(conn); } @Override @@ -113,4 +114,42 @@ private void checkAndUpdateAffinityGroupNameCharSetToUtf8mb4(Connection conn) { logger.warn("Exception while updating char set for affinity group name to utf8mb4: " + e.getMessage()); } } + + private void addIndexes(Connection conn) { + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "mgmt_server_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "resource"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "resource_state"); + DbUpgradeUtils.addIndexIfNeeded(conn, "host", "type"); + + DbUpgradeUtils.renameIndexIfNeeded(conn, "user_ip_address", "public_ip_address", "uk_public_ip_address"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "public_ip_address"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "data_center_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "vlan_db_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "user_ip_address", "removed"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "vlan_type"); + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "data_center_id"); + DbUpgradeUtils.addIndexIfNeeded(conn, "vlan", "removed"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "name"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "network_offering_details", "resource_id", "resource_type"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "cpu"); + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "speed"); + DbUpgradeUtils.addIndexIfNeeded(conn, "service_offering", "ram_size"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "op_host_planner_reservation", "resource_usage"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "storage_pool", "pool_type"); + DbUpgradeUtils.addIndexIfNeeded(conn, "storage_pool", "data_center_id", "status", "scope", "hypervisor"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "router_network_ref", "guest_type"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "domain_router", "role"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "async_job", "instance_type", "job_status"); + + DbUpgradeUtils.addIndexIfNeeded(conn, "cluster", "managed_state"); + } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java index cb19748fda46..af32163b4c70 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDao.java @@ -45,7 +45,7 @@ public interface ConsoleProxyDao extends GenericDao { public List getDatacenterSessionLoadMatrix(); - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes); + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public List> getProxyLoadMatrix(); diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java index ef94a4d9f72e..bc79194a10fd 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/ConsoleProxyDaoImpl.java @@ -23,7 +23,6 @@ import java.util.Date; import java.util.List; - import org.springframework.stereotype.Component; import com.cloud.info.ConsoleProxyLoadInfo; @@ -76,11 +75,11 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im private static final String GET_PROXY_ACTIVE_LOAD = "SELECT active_session AS count" + " FROM console_proxy" + " WHERE id=?"; - private static final String STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph " - + " WHERE p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id"; + protected static final String STORAGE_POOL_HOST_INFO = "SELECT (SELECT id FROM storage_pool_host_ref ph WHERE " + + "ph.pool_id=p.id limit 1) AS sphr FROM storage_pool p WHERE p.data_center_id = ?"; - private static final String SHARED_STORAGE_POOL_HOST_INFO = "SELECT p.data_center_id, count(ph.host_id) " + " FROM storage_pool p, storage_pool_host_ref ph " - + " WHERE p.pool_type <> 'LVM' AND p.id = ph.pool_id AND p.data_center_id = ? " + " GROUP by p.data_center_id"; + protected static final String SHARED_STORAGE_POOL_HOST_INFO = "SELECT (SELECT id FROM storage_pool_host_ref ph " + + "WHERE ph.pool_id=p.id limit 1) AS sphr FROM storage_pool p WHERE p.data_center_id = ? AND p.pool_type NOT IN ('LVM', 'Filesystem')"; protected SearchBuilder DataCenterStatusSearch; protected SearchBuilder StateSearch; @@ -219,28 +218,23 @@ public List> getProxyLoadMatrix() { } @Override - public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes) { - ArrayList> l = new ArrayList>(); - + public boolean hasDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { + Long poolCount = 0L; + String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; TransactionLegacy txn = TransactionLegacy.currentTxn(); - ; - PreparedStatement pstmt = null; - try { - if (countAllPoolTypes) { - pstmt = txn.prepareAutoCloseStatement(STORAGE_POOL_HOST_INFO); - } else { - pstmt = txn.prepareAutoCloseStatement(SHARED_STORAGE_POOL_HOST_INFO); - } + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql)) { pstmt.setLong(1, dcId); - ResultSet rs = pstmt.executeQuery(); while (rs.next()) { - l.add(new Pair(rs.getLong(1), rs.getInt(2))); + poolCount = rs.getLong(1); + if (poolCount > 0) { + return true; + } } } catch (SQLException e) { logger.debug("Caught SQLException: ", e); } - return l; + return false; } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java index 887b3d73087e..44866c0a3585 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/NicIpAliasDaoImpl.java @@ -170,8 +170,7 @@ public NicIpAliasVO findByIp4AddressAndNetworkIdAndInstanceId(long networkId, Lo public Integer countAliasIps(long id) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("instanceId", id); - List list = listBy(sc); - return list.size(); + return getCount(sc); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 52bc5aac7e25..4dee9130f513 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.vm.dao; +import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -81,7 +82,7 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listByHostAndState(long hostId, State... states); - List listByTypes(VirtualMachine.Type... types); + int countByTypes(VirtualMachine.Type... types); VMInstanceVO findByIdTypes(long id, VirtualMachine.Type... types); @@ -144,21 +145,28 @@ public interface VMInstanceDao extends GenericDao, StateDao< */ List listDistinctHostNames(long networkId, VirtualMachine.Type... types); + List findByHostInStatesExcluding(Long hostId, Collection excludingIds, State... states); + List findByHostInStates(Long hostId, State... states); List listStartingWithNoHostId(); boolean updatePowerState(long instanceId, long powerHostId, VirtualMachine.PowerState powerState, Date wisdomEra); + Map updatePowerState(Map instancePowerStates, + long powerHostId, Date wisdomEra); + void resetVmPowerStateTracking(long instanceId); + void resetVmPowerStateTracking(List instanceId); + void resetHostPowerStateTracking(long hostId); HashMap countVgpuVMs(Long dcId, Long podId, Long clusterId); VMInstanceVO findVMByHostNameInZone(String hostName, long zoneId); - boolean isPowerStateUpToDate(long instanceId); + boolean isPowerStateUpToDate(VMInstanceVO instance); List listNonMigratingVmsByHostEqualsLastHost(long hostId); @@ -170,4 +178,15 @@ List searchRemovedByRemoveDate(final Date startDate, final Date en List skippedVmIds); Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId); + + List listIdServiceOfferingForUpVmsByHostId(Long hostId); + + List listIdServiceOfferingForVmsMigratingFromHost(Long hostId); + + List listIdServiceOfferingForVmsByLastHostId(Long hostId); + + Map getNameIdMapForVmInstanceNames(Collection names); + + Map getNameIdMapForVmIds(Collection ids); + } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 744518ba7433..9df5795f69eb 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -20,6 +20,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -75,6 +76,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder LHVMClusterSearch; protected SearchBuilder IdStatesSearch; protected SearchBuilder AllFieldsSearch; + protected SearchBuilder IdServiceOfferingIdSelectSearch; protected SearchBuilder ZoneTemplateNonExpungedSearch; protected SearchBuilder TemplateNonExpungedSearch; protected SearchBuilder NameLikeSearch; @@ -101,6 +103,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected SearchBuilder BackupSearch; protected SearchBuilder LastHostAndStatesSearch; protected SearchBuilder VmsNotInClusterUsingPool; + protected SearchBuilder IdsPowerStateSelectSearch; @Inject ResourceTagDao tagsDao; @@ -177,6 +180,14 @@ protected void init() { AllFieldsSearch.and("account", AllFieldsSearch.entity().getAccountId(), Op.EQ); AllFieldsSearch.done(); + IdServiceOfferingIdSelectSearch = createSearchBuilder(); + IdServiceOfferingIdSelectSearch.and("host", IdServiceOfferingIdSelectSearch.entity().getHostId(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("lastHost", IdServiceOfferingIdSelectSearch.entity().getLastHostId(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("state", IdServiceOfferingIdSelectSearch.entity().getState(), Op.EQ); + IdServiceOfferingIdSelectSearch.and("states", IdServiceOfferingIdSelectSearch.entity().getState(), Op.IN); + IdServiceOfferingIdSelectSearch.selectFields(IdServiceOfferingIdSelectSearch.entity().getId(), IdServiceOfferingIdSelectSearch.entity().getServiceOfferingId()); + IdServiceOfferingIdSelectSearch.done(); + ZoneTemplateNonExpungedSearch = createSearchBuilder(); ZoneTemplateNonExpungedSearch.and("zone", ZoneTemplateNonExpungedSearch.entity().getDataCenterId(), Op.EQ); ZoneTemplateNonExpungedSearch.and("template", ZoneTemplateNonExpungedSearch.entity().getTemplateId(), Op.EQ); @@ -276,6 +287,7 @@ protected void init() { HostAndStateSearch = createSearchBuilder(); HostAndStateSearch.and("host", HostAndStateSearch.entity().getHostId(), Op.EQ); HostAndStateSearch.and("states", HostAndStateSearch.entity().getState(), Op.IN); + HostAndStateSearch.and("idsNotIn", HostAndStateSearch.entity().getId(), Op.NIN); HostAndStateSearch.done(); StartingWithNoHostSearch = createSearchBuilder(); @@ -325,6 +337,15 @@ protected void init() { VmsNotInClusterUsingPool.join("hostSearch2", hostSearch2, hostSearch2.entity().getId(), VmsNotInClusterUsingPool.entity().getHostId(), JoinType.INNER); VmsNotInClusterUsingPool.and("vmStates", VmsNotInClusterUsingPool.entity().getState(), Op.IN); VmsNotInClusterUsingPool.done(); + + IdsPowerStateSelectSearch = createSearchBuilder(); + IdsPowerStateSelectSearch.and("id", IdsPowerStateSelectSearch.entity().getId(), Op.IN); + IdsPowerStateSelectSearch.selectFields(IdsPowerStateSelectSearch.entity().getId(), + IdsPowerStateSelectSearch.entity().getPowerHostId(), + IdsPowerStateSelectSearch.entity().getPowerState(), + IdsPowerStateSelectSearch.entity().getPowerStateUpdateCount(), + IdsPowerStateSelectSearch.entity().getPowerStateUpdateTime()); + IdsPowerStateSelectSearch.done(); } @Override @@ -460,10 +481,10 @@ public List listUpByHostId(Long hostId) { } @Override - public List listByTypes(Type... types) { + public int countByTypes(Type... types) { SearchCriteria sc = TypesSearch.create(); sc.setParameters("types", (Object[])types); - return listBy(sc); + return getCount(sc); } @Override @@ -899,6 +920,17 @@ public boolean remove(Long id) { return result; } + @Override + public List findByHostInStatesExcluding(Long hostId, Collection excludingIds, State... states) { + SearchCriteria sc = HostAndStateSearch.create(); + sc.setParameters("host", hostId); + if (excludingIds != null && !excludingIds.isEmpty()) { + sc.setParameters("idsNotIn", excludingIds.toArray()); + } + sc.setParameters("states", (Object[])states); + return listBy(sc); + } + @Override public List findByHostInStates(Long hostId, State... states) { SearchCriteria sc = HostAndStateSearch.create(); @@ -914,40 +946,107 @@ public List listStartingWithNoHostId() { return listBy(sc); } + protected List listSelectPowerStateByIds(final List ids) { + if (CollectionUtils.isEmpty(ids)) { + return new ArrayList<>(); + } + SearchCriteria sc = IdsPowerStateSelectSearch.create(); + sc.setParameters("id", ids.toArray()); + return customSearch(sc, null); + } + + protected Integer getPowerUpdateCount(final VMInstanceVO instance, final long powerHostId, + final VirtualMachine.PowerState powerState, Date wisdomEra) { + if (instance.getPowerStateUpdateTime() == null || instance.getPowerStateUpdateTime().before(wisdomEra)) { + Long savedPowerHostId = instance.getPowerHostId(); + boolean isStateMismatch = instance.getPowerState() != powerState + || savedPowerHostId == null + || !savedPowerHostId.equals(powerHostId) + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance); + if (isStateMismatch) { + return 1; + } else if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { + return instance.getPowerStateUpdateCount() + 1; + } + } + return null; + } + @Override - public boolean updatePowerState(final long instanceId, final long powerHostId, final VirtualMachine.PowerState powerState, Date wisdomEra) { - return Transaction.execute(new TransactionCallback<>() { - @Override - public Boolean doInTransaction(TransactionStatus status) { - boolean needToUpdate = false; - VMInstanceVO instance = findById(instanceId); - if (instance != null - && (null == instance.getPowerStateUpdateTime() - || instance.getPowerStateUpdateTime().before(wisdomEra))) { - Long savedPowerHostId = instance.getPowerHostId(); - if (instance.getPowerState() != powerState - || savedPowerHostId == null - || savedPowerHostId != powerHostId - || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance)) { - instance.setPowerState(powerState); - instance.setPowerHostId(powerHostId); - instance.setPowerStateUpdateCount(1); - instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); - needToUpdate = true; - update(instanceId, instance); - } else { - // to reduce DB updates, consecutive same state update for more than 3 times - if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { - instance.setPowerStateUpdateCount(instance.getPowerStateUpdateCount() + 1); - instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); - needToUpdate = true; - update(instanceId, instance); - } - } + public boolean updatePowerState(final long instanceId, final long powerHostId, + final VirtualMachine.PowerState powerState, Date wisdomEra) { + return Transaction.execute((TransactionCallback) status -> { + VMInstanceVO instance = findById(instanceId); + if (instance == null) { + return false; + } + // Check if we need to update based on powerStateUpdateTime + if (instance.getPowerStateUpdateTime() == null || instance.getPowerStateUpdateTime().before(wisdomEra)) { + Long savedPowerHostId = instance.getPowerHostId(); + boolean isStateMismatch = instance.getPowerState() != powerState + || savedPowerHostId == null + || !savedPowerHostId.equals(powerHostId) + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance); + + if (isStateMismatch) { + instance.setPowerState(powerState); + instance.setPowerHostId(powerHostId); + instance.setPowerStateUpdateCount(1); + } else if (instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT) { + instance.setPowerStateUpdateCount(instance.getPowerStateUpdateCount() + 1); + } else { + // No need to update if power state is already in sync and count exceeded + return false; } - return needToUpdate; + instance.setPowerStateUpdateTime(DateUtil.currentGMTTime()); + update(instanceId, instance); + return true; // Return true since an update occurred + } + return false; + }); + } + + @Override + public Map updatePowerState( + final Map instancePowerStates, long powerHostId, Date wisdomEra) { + Map notUpdated = new HashMap<>(); + List instances = listSelectPowerStateByIds(new ArrayList<>(instancePowerStates.keySet())); + Map updateCounts = new HashMap<>(); + for (VMInstanceVO instance : instances) { + VirtualMachine.PowerState powerState = instancePowerStates.get(instance.getId()); + Integer count = getPowerUpdateCount(instance, powerHostId, powerState, wisdomEra); + if (count != null) { + updateCounts.put(instance.getId(), count); + } else { + notUpdated.put(instance.getId(), powerState); } + } + if (updateCounts.isEmpty()) { + return notUpdated; + } + StringBuilder sql = new StringBuilder("UPDATE `cloud`.`vm_instance` SET " + + "`power_host` = ?, `power_state_update_time` = now(), `power_state` = CASE "); + updateCounts.keySet().forEach(key -> { + sql.append("WHEN id = ").append(key).append(" THEN '").append(instancePowerStates.get(key)).append("' "); + }); + sql.append("END, `power_state_update_count` = CASE "); + StringBuilder idList = new StringBuilder(); + updateCounts.forEach((key, value) -> { + sql.append("WHEN `id` = ").append(key).append(" THEN ").append(value).append(" "); + idList.append(key).append(","); }); + idList.setLength(idList.length() - 1); + sql.append("END WHERE `id` IN (").append(idList).append(")"); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + try (PreparedStatement pstmt = txn.prepareAutoCloseStatement(sql.toString())) { + pstmt.setLong(1, powerHostId); + pstmt.executeUpdate(); + } catch (SQLException e) { + logger.error(String.format("Unable to execute update power states SQL from VMs %s due to: %s", + idList, e.getMessage()), e); + return instancePowerStates; + } + return notUpdated; } private boolean isPowerStateInSyncWithInstanceState(final VirtualMachine.PowerState powerState, final long powerHostId, final VMInstanceVO instance) { @@ -962,11 +1061,7 @@ private boolean isPowerStateInSyncWithInstanceState(final VirtualMachine.PowerSt } @Override - public boolean isPowerStateUpToDate(final long instanceId) { - VMInstanceVO instance = findById(instanceId); - if(instance == null) { - throw new CloudRuntimeException("checking power state update count on non existing instance " + instanceId); - } + public boolean isPowerStateUpToDate(final VMInstanceVO instance) { return instance.getPowerStateUpdateCount() < MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT; } @@ -985,6 +1080,22 @@ public void doInTransactionWithoutResult(TransactionStatus status) { }); } + @Override + public void resetVmPowerStateTracking(List instanceIds) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + SearchCriteria sc = IdsPowerStateSelectSearch.create(); + sc.setParameters("id", instanceIds.toArray()); + VMInstanceVO vm = createForUpdate(); + vm.setPowerStateUpdateCount(0); + vm.setPowerStateUpdateTime(DateUtil.currentGMTTime()); + UpdateBuilder ub = getUpdateBuilder(vm); + update(ub, sc, null); + } + }); + } + @Override @DB public void resetHostPowerStateTracking(final long hostId) { Transaction.execute(new TransactionCallbackNoReturn() { @@ -1060,6 +1171,7 @@ public List searchRemovedByRemoveDate(Date startDate, Date endDate return searchIncludingRemoved(sc, filter, null, false); } + @Override public Pair, Integer> listByVmsNotInClusterUsingPool(long clusterId, long poolId) { SearchCriteria sc = VmsNotInClusterUsingPool.create(); sc.setParameters("vmStates", State.Starting, State.Running, State.Stopping, State.Migrating, State.Restoring); @@ -1069,4 +1181,52 @@ public Pair, Integer> listByVmsNotInClusterUsingPool(long clu List uniqueVms = vms.stream().distinct().collect(Collectors.toList()); return new Pair<>(uniqueVms, uniqueVms.size()); } + + @Override + public List listIdServiceOfferingForUpVmsByHostId(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("host", hostId); + sc.setParameters("states", new Object[] {State.Starting, State.Running, State.Stopping, State.Migrating}); + return customSearch(sc, null); + } + + @Override + public List listIdServiceOfferingForVmsMigratingFromHost(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("lastHost", hostId); + sc.setParameters("state", State.Migrating); + return customSearch(sc, null); + } + + @Override + public List listIdServiceOfferingForVmsByLastHostId(Long hostId) { + SearchCriteria sc = IdServiceOfferingIdSelectSearch.create(); + sc.setParameters("lastHost", hostId); + sc.setParameters("state", State.Stopped); + return customSearch(sc, null); + } + + @Override + public Map getNameIdMapForVmInstanceNames(Collection names) { + SearchBuilder sb = createSearchBuilder(); + sb.and("name", sb.entity().getInstanceName(), Op.IN); + sb.selectFields(sb.entity().getId(), sb.entity().getInstanceName()); + SearchCriteria sc = sb.create(); + sc.setParameters("name", names.toArray()); + List vms = customSearch(sc, null); + return vms.stream() + .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); + } + + @Override + public Map getNameIdMapForVmIds(Collection ids) { + SearchBuilder sb = createSearchBuilder(); + sb.and("id", sb.entity().getId(), Op.IN); + sb.selectFields(sb.entity().getId(), sb.entity().getInstanceName()); + SearchCriteria sc = sb.create(); + sc.setParameters("id", ids.toArray()); + List vms = customSearch(sc, null); + return vms.stream() + .collect(Collectors.toMap(VMInstanceVO::getInstanceName, VMInstanceVO::getId)); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java index 8f3d264da981..6d0d9378c7c5 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java @@ -88,6 +88,8 @@ public interface ResourceDetailsDao extends GenericDao public Map listDetailsKeyPairs(long resourceId); + Map listDetailsKeyPairs(long resourceId, List keys); + public Map listDetailsKeyPairs(long resourceId, boolean forDisplay); Map listDetailsVisibility(long resourceId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java index 4205a7823e44..f2e156f225ae 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java @@ -19,6 +19,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import org.apache.cloudstack.api.ResourceDetail; import org.apache.commons.collections.CollectionUtils; @@ -91,6 +92,20 @@ public Map listDetailsKeyPairs(long resourceId) { return details; } + @Override + public Map listDetailsKeyPairs(long resourceId, List keys) { + SearchBuilder sb = createSearchBuilder(); + sb.and("resourceId", sb.entity().getResourceId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.IN); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("resourceId", resourceId); + sc.setParameters("name", keys.toArray()); + + List results = search(sc, null); + return results.stream().collect(Collectors.toMap(R::getName, R::getValue)); + } + public Map listDetailsVisibility(long resourceId) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("resourceId", resourceId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 1658fe0a537e..07b0b8b517c4 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -28,20 +28,20 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.storage.Storage; -import com.cloud.utils.Pair; -import com.cloud.utils.db.Filter; import org.apache.commons.collections.CollectionUtils; import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StoragePoolTagVO; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.Pair; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql index b6abaabcd48f..640b2397a461 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.network_offering_view.sql @@ -76,13 +76,9 @@ SELECT FROM `cloud`.`network_offerings` LEFT JOIN - `cloud`.`network_offering_details` AS `domain_details` ON `domain_details`.`network_offering_id` = `network_offerings`.`id` AND `domain_details`.`name`='domainid' + `cloud`.`domain` AS `domain` ON `domain`.id IN (SELECT value from `network_offering_details` where `name` = 'domainid' and `network_offering_id` = `network_offerings`.`id`) LEFT JOIN - `cloud`.`domain` AS `domain` ON FIND_IN_SET(`domain`.`id`, `domain_details`.`value`) - LEFT JOIN - `cloud`.`network_offering_details` AS `zone_details` ON `zone_details`.`network_offering_id` = `network_offerings`.`id` AND `zone_details`.`name`='zoneid' - LEFT JOIN - `cloud`.`data_center` AS `zone` ON FIND_IN_SET(`zone`.`id`, `zone_details`.`value`) + `cloud`.`data_center` AS `zone` ON `zone`.`id` IN (SELECT value from `network_offering_details` where `name` = 'zoneid' and `network_offering_id` = `network_offerings`.`id`) LEFT JOIN `cloud`.`network_offering_details` AS `offering_details` ON `offering_details`.`network_offering_id` = `network_offerings`.`id` AND `offering_details`.`name`='internetProtocol' GROUP BY diff --git a/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java b/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java new file mode 100644 index 000000000000..76c1092546a6 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/capacity/dao/CapacityDaoImplTest.java @@ -0,0 +1,99 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.capacity.dao; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.capacity.CapacityVO; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class CapacityDaoImplTest { + @Spy + @InjectMocks + CapacityDaoImpl capacityDao = new CapacityDaoImpl(); + + private SearchBuilder searchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + searchBuilder = mock(SearchBuilder.class); + CapacityVO capacityVO = mock(CapacityVO.class); + when(searchBuilder.entity()).thenReturn(capacityVO); + searchCriteria = mock(SearchCriteria.class); + doReturn(searchBuilder).when(capacityDao).createSearchBuilder(); + when(searchBuilder.create()).thenReturn(searchCriteria); + } + + @Test + public void testListByHostIdTypes() { + // Prepare inputs + Long hostId = 1L; + List capacityTypes = Arrays.asList((short)1, (short)2); + CapacityVO capacity1 = new CapacityVO(); + CapacityVO capacity2 = new CapacityVO(); + List mockResult = Arrays.asList(capacity1, capacity2); + doReturn(mockResult).when(capacityDao).listBy(any(SearchCriteria.class)); + List result = capacityDao.listByHostIdTypes(hostId, capacityTypes); + verify(searchBuilder).and(eq("hostId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("type"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("hostId", hostId); + verify(searchCriteria).setParameters("type", capacityTypes.toArray()); + verify(capacityDao).listBy(searchCriteria); + assertEquals(2, result.size()); + assertSame(capacity1, result.get(0)); + assertSame(capacity2, result.get(1)); + } + + @Test + public void testListByHostIdTypesEmptyResult() { + Long hostId = 1L; + List capacityTypes = Arrays.asList((short)1, (short)2); + doReturn(Collections.emptyList()).when(capacityDao).listBy(any(SearchCriteria.class)); + List result = capacityDao.listByHostIdTypes(hostId, capacityTypes); + verify(searchBuilder).and(Mockito.eq("hostId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("type"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("hostId", hostId); + verify(searchCriteria).setParameters("type", capacityTypes.toArray()); + verify(capacityDao).listBy(searchCriteria); + assertTrue(result.isEmpty()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java b/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java new file mode 100644 index 000000000000..a513809be057 --- /dev/null +++ b/engine/schema/src/test/java/com/cloud/dc/dao/ClusterDaoImplTest.java @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.dc.ClusterVO; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; + +@RunWith(MockitoJUnitRunner.class) +public class ClusterDaoImplTest { + @Spy + @InjectMocks + ClusterDaoImpl clusterDao = new ClusterDaoImpl(); + + private GenericSearchBuilder genericSearchBuilder; + + @Before + public void setUp() { + genericSearchBuilder = mock(SearchBuilder.class); + ClusterVO entityVO = mock(ClusterVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(clusterDao).createSearchBuilder(Long.class); + } + + @Test + public void testListAllIds() { + List mockIds = Arrays.asList(1L, 2L, 3L); + doReturn(mockIds).when(clusterDao).customSearch(any(), isNull()); + List result = clusterDao.listAllIds(); + verify(clusterDao).customSearch(genericSearchBuilder.create(), null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(1L), result.get(0)); + assertEquals(Long.valueOf(2L), result.get(1)); + assertEquals(Long.valueOf(3L), result.get(2)); + } + + @Test + public void testListAllIdsEmptyResult() { + doReturn(Collections.emptyList()).when(clusterDao).customSearch(any(), isNull()); + List result = clusterDao.listAllIds(); + verify(clusterDao).customSearch(genericSearchBuilder.create(), null); + assertTrue(result.isEmpty()); + } +} diff --git a/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java b/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java index 05d9154b6a45..fa47d2cd90b4 100644 --- a/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/usage/dao/UsageStorageDaoImplTest.java @@ -23,12 +23,9 @@ import static org.mockito.Mockito.when; import java.sql.PreparedStatement; -import com.cloud.utils.DateUtil; -import com.cloud.utils.db.TransactionLegacy; import java.util.Date; import java.util.TimeZone; -import com.cloud.usage.UsageStorageVO; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -36,6 +33,10 @@ import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import com.cloud.usage.UsageStorageVO; +import com.cloud.utils.DateUtil; +import com.cloud.utils.db.TransactionLegacy; + @RunWith(MockitoJUnitRunner.class) public class UsageStorageDaoImplTest { diff --git a/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java b/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java new file mode 100644 index 000000000000..4c54599c396c --- /dev/null +++ b/engine/schema/src/test/java/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBaseTest.java @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.isNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@RunWith(MockitoJUnitRunner.class) +public class ResourceDetailsDaoBaseTest { + @Spy + @InjectMocks + TestDetailsDao testDetailsDao = new TestDetailsDao(); + + private SearchBuilder searchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + searchBuilder = mock(SearchBuilder.class); + searchCriteria = mock(SearchCriteria.class); + TestDetailVO entityVO = mock(TestDetailVO.class); + when(searchBuilder.entity()).thenReturn(entityVO); + searchCriteria = mock(SearchCriteria.class); + doReturn(searchBuilder).when(testDetailsDao).createSearchBuilder(); + when(searchBuilder.create()).thenReturn(searchCriteria); + } + + @Test + public void testListDetailsKeyPairs() { + long resourceId = 1L; + List keys = Arrays.asList("key1", "key2"); + TestDetailVO result1 = mock(TestDetailVO.class); + when(result1.getName()).thenReturn("key1"); + when(result1.getValue()).thenReturn("value1"); + TestDetailVO result2 = mock(TestDetailVO.class); + when(result2.getName()).thenReturn("key2"); + when(result2.getValue()).thenReturn("value2"); + List mockResults = Arrays.asList(result1, result2); + doReturn(mockResults).when(testDetailsDao).search(any(SearchCriteria.class), isNull()); + Map result = testDetailsDao.listDetailsKeyPairs(resourceId, keys); + verify(searchBuilder).and(eq("resourceId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("name"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("resourceId", resourceId); + verify(searchCriteria).setParameters("name", keys.toArray()); + verify(testDetailsDao).search(searchCriteria, null); + assertEquals(2, result.size()); + assertEquals("value1", result.get("key1")); + assertEquals("value2", result.get("key2")); + } + + @Test + public void testListDetailsKeyPairsEmptyResult() { + long resourceId = 1L; + List keys = Arrays.asList("key1", "key2"); + doReturn(Collections.emptyList()).when(testDetailsDao).search(any(SearchCriteria.class), isNull()); + Map result = testDetailsDao.listDetailsKeyPairs(resourceId, keys); + verify(searchBuilder).and(eq("resourceId"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchBuilder).and(eq("name"), any(), eq(SearchCriteria.Op.IN)); + verify(searchBuilder).done(); + verify(searchCriteria).setParameters("resourceId", resourceId); + verify(searchCriteria).setParameters("name", keys.toArray()); + verify(testDetailsDao).search(searchCriteria, null); + assertTrue(result.isEmpty()); + } + + protected static class TestDetailsDao extends ResourceDetailsDaoBase { + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new TestDetailVO(resourceId, key, value, display)); + } + } + + @Entity + @Table(name = "test_details") + protected static class TestDetailVO implements ResourceDetail { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "resource_id") + private long resourceId; + + @Column(name = "name") + private String name; + + @Column(name = "value") + private String value; + + @Column(name = "display") + private boolean display = true; + + public TestDetailVO() { + } + + public TestDetailVO(long resourceId, String name, String value, boolean display) { + this.resourceId = resourceId; + this.name = name; + this.value = value; + this.display = display; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } + + public void setName(String name) { + this.name = name; + } + + public void setValue(String value) { + this.value = value; + } + } +} diff --git a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java index bfcc38ba1048..fc41a82e71d3 100755 --- a/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java +++ b/engine/schema/src/test/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImplTest.java @@ -17,12 +17,17 @@ package org.apache.cloudstack.storage.datastore.db; import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.IOException; import java.sql.SQLException; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,13 +39,15 @@ import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; import com.cloud.storage.ScopeType; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.StoragePoolTagsDao; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; import junit.framework.TestCase; -import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class PrimaryDataStoreDaoImplTest extends TestCase { @@ -59,6 +66,8 @@ public class PrimaryDataStoreDaoImplTest extends TestCase { @Mock StoragePoolVO storagePoolVO; + private GenericSearchBuilder genericSearchBuilder; + private static final String STORAGE_TAG_1 = "NFS-A"; private static final String STORAGE_TAG_2 = "NFS-B"; private static final String[] STORAGE_TAGS_ARRAY = {STORAGE_TAG_1, STORAGE_TAG_2}; @@ -155,4 +164,32 @@ public void testFindPoolsByDetailsOrTagsInternalDetailsType() { String expectedSql = primaryDataStoreDao.DetailsSqlPrefix + SQL_VALUES + primaryDataStoreDao.DetailsSqlSuffix; verify(primaryDataStoreDao).searchStoragePoolsPreparedStatement(expectedSql, DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, STORAGE_POOL_DETAILS.size()); } + + @Test + public void testListAllIds() { + GenericSearchBuilder genericSearchBuilder = mock(SearchBuilder.class); + StoragePoolVO entityVO = mock(StoragePoolVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(primaryDataStoreDao).createSearchBuilder(Long.class); + List mockIds = Arrays.asList(1L, 2L, 3L); + doReturn(mockIds).when(primaryDataStoreDao).customSearch(any(), isNull()); + List result = primaryDataStoreDao.listAllIds(); + verify(primaryDataStoreDao).customSearch(genericSearchBuilder.create(), null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(1L), result.get(0)); + assertEquals(Long.valueOf(2L), result.get(1)); + assertEquals(Long.valueOf(3L), result.get(2)); + } + + @Test + public void testListAllIdsEmptyResult() { + GenericSearchBuilder genericSearchBuilder = mock(SearchBuilder.class); + StoragePoolVO entityVO = mock(StoragePoolVO.class); + when(genericSearchBuilder.entity()).thenReturn(entityVO); + doReturn(genericSearchBuilder).when(primaryDataStoreDao).createSearchBuilder(Long.class); + doReturn(Collections.emptyList()).when(primaryDataStoreDao).customSearch(any(), isNull()); + List result = primaryDataStoreDao.listAllIds(); + verify(primaryDataStoreDao).customSearch(genericSearchBuilder.create(), null); + assertTrue(result.isEmpty()); + } } diff --git a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java index c87a0996fcc9..062d2226876f 100644 --- a/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java +++ b/framework/agent-lb/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBAlgorithm.java @@ -42,4 +42,8 @@ public interface IndirectAgentLBAlgorithm { * @return true if the lists are equal, false if not */ boolean compare(final List msList, final List receivedMsList); + + default boolean isHostListNeeded() { + return false; + } } diff --git a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java index 827be4fe2998..c7fad068d490 100644 --- a/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java +++ b/framework/cluster/src/main/java/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java @@ -96,7 +96,6 @@ public int countStateSeenInPeers(long mshost, long runid, ManagementServerHost.S sc.setParameters("peerRunid", runid); sc.setParameters("peerState", state); - List l = listBy(sc); - return l.size(); + return getCount(sc); } } diff --git a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java index b47370d92059..911a4ad37078 100644 --- a/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java +++ b/framework/config/src/main/java/org/apache/cloudstack/framework/config/impl/ConfigDepotImpl.java @@ -23,7 +23,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import javax.inject.Inject; @@ -36,6 +35,7 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.dao.ConfigurationGroupDao; import org.apache.cloudstack.framework.config.dao.ConfigurationSubGroupDao; +import org.apache.cloudstack.utils.cache.LazyCache; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; @@ -44,8 +44,6 @@ import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; -import com.github.benmanes.caffeine.cache.Cache; -import com.github.benmanes.caffeine.cache.Caffeine; /** * ConfigDepotImpl implements the ConfigDepot and ConfigDepotAdmin interface. @@ -87,17 +85,15 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin { List _scopedStorages; Set _configured = Collections.synchronizedSet(new HashSet()); Set newConfigs = Collections.synchronizedSet(new HashSet<>()); - Cache configCache; + LazyCache configCache; private HashMap>> _allKeys = new HashMap>>(1007); HashMap>> _scopeLevelConfigsMap = new HashMap>>(); public ConfigDepotImpl() { - configCache = Caffeine.newBuilder() - .maximumSize(512) - .expireAfterWrite(CONFIG_CACHE_EXPIRE_SECONDS, TimeUnit.SECONDS) - .build(); + configCache = new LazyCache<>(512, + CONFIG_CACHE_EXPIRE_SECONDS, this::getConfigStringValueInternal); ConfigKey.init(this); createEmptyScopeLevelMappings(); } @@ -311,7 +307,7 @@ private String getConfigCacheKey(String key, ConfigKey.Scope scope, Long scopeId @Override public String getConfigStringValue(String key, ConfigKey.Scope scope, Long scopeId) { - return configCache.get(getConfigCacheKey(key, scope, scopeId), this::getConfigStringValueInternal); + return configCache.get(getConfigCacheKey(key, scope, scopeId)); } @Override diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java index de8838b09992..44c312ea9d80 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java @@ -148,6 +148,11 @@ public interface GenericDao { */ List listAll(Filter filter); + /** + * Look IDs for all active rows. + */ + List listAllIds(); + /** * Search for the entity beans * @param sc diff --git a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index 82fea9749ff8..433f28162804 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@ -1214,6 +1214,35 @@ public List listAll(final Filter filter) { return executeList(sql.toString()); } + private Object getIdObject() { + T entity = (T)_searchEnhancer.create(); + try { + Method m = _entityBeanType.getMethod("getId"); + return m.invoke(entity); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) { + logger.warn(String.format("Unable to get ID object for entity: %s", _entityBeanType.getSimpleName())); + } + return null; + } + + @Override + public List listAllIds() { + Object idObj = getIdObject(); + if (idObj == null) { + return Collections.emptyList(); + } + Class clazz = (Class)idObj.getClass(); + GenericSearchBuilder sb = createSearchBuilder(clazz); + try { + Method m = sb.entity().getClass().getMethod("getId"); + sb.selectFields(m.invoke(sb.entity())); + } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ignored) { + return Collections.emptyList(); + } + sb.done(); + return customSearch(sb.create(), null); + } + @Override public boolean expunge(final ID id) { final TransactionLegacy txn = TransactionLegacy.currentTxn(); @@ -2441,4 +2470,11 @@ private Optional> getConverter(Field field) { } } + public static class SumCount { + public long sum; + public long count; + + public SumCount() { + } + } } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java index b3bfda0334cf..79ec3f2b0872 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDao.java @@ -40,4 +40,5 @@ public interface VmWorkJobDao extends GenericDao { void expungeLeftoverWorkJobs(long msid); int expungeByVmList(List vmIds, Long batchSize); + List listVmIdsWithPendingJob(); } diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java index 3b167498a377..a467b5fdf59e 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImpl.java @@ -24,6 +24,7 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO.Step; import org.apache.cloudstack.jobs.JobInfo; @@ -32,6 +33,8 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @@ -224,4 +227,17 @@ public int expungeByVmList(List vmIds, Long batchSize) { sc.setParameters("vmIds", vmIds.toArray()); return batchExpunge(sc, batchSize); } + + @Override + public List listVmIdsWithPendingJob() { + GenericSearchBuilder sb = createSearchBuilder(Long.class); + SearchBuilder asyncJobSearch = _baseJobDao.createSearchBuilder(); + asyncJobSearch.and("status", asyncJobSearch.entity().getStatus(), SearchCriteria.Op.EQ); + sb.join("asyncJobSearch", asyncJobSearch, sb.entity().getId(), asyncJobSearch.entity().getId(), JoinBuilder.JoinType.INNER); + sb.and("removed", sb.entity().getRemoved(), Op.NULL); + sb.selectFields(sb.entity().getVmInstanceId()); + SearchCriteria sc = sb.create(); + sc.setJoinParameters("asyncJobSearch", "status", JobInfo.Status.IN_PROGRESS); + return customSearch(sc, null); + } } diff --git a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java index 3e2bc15b1e0c..a70a96b1a145 100644 --- a/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java +++ b/framework/jobs/src/test/java/org/apache/cloudstack/framework/jobs/dao/VmWorkJobDaoImplTest.java @@ -16,27 +16,69 @@ // under the License. package org.apache.cloudstack.framework.jobs.dao; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; +import org.apache.cloudstack.jobs.JobInfo; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Mockito; +import org.mockito.InjectMocks; +import org.mockito.Mock; import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; import org.mockito.stubbing.Answer; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @RunWith(MockitoJUnitRunner.class) public class VmWorkJobDaoImplTest { + @Mock + AsyncJobDao asyncJobDao; @Spy + @InjectMocks VmWorkJobDaoImpl vmWorkJobDaoImpl; + private GenericSearchBuilder genericVmWorkJobSearchBuilder; + private SearchBuilder asyncJobSearchBuilder; + private SearchCriteria searchCriteria; + + @Before + public void setUp() { + genericVmWorkJobSearchBuilder = mock(GenericSearchBuilder.class); + VmWorkJobVO entityVO = mock(VmWorkJobVO.class); + when(genericVmWorkJobSearchBuilder.entity()).thenReturn(entityVO); + asyncJobSearchBuilder = mock(SearchBuilder.class); + AsyncJobVO asyncJobVO = mock(AsyncJobVO.class); + when(asyncJobSearchBuilder.entity()).thenReturn(asyncJobVO); + searchCriteria = mock(SearchCriteria.class); + when(vmWorkJobDaoImpl.createSearchBuilder(Long.class)).thenReturn(genericVmWorkJobSearchBuilder); + when(asyncJobDao.createSearchBuilder()).thenReturn(asyncJobSearchBuilder); + when(genericVmWorkJobSearchBuilder.create()).thenReturn(searchCriteria); + } + @Test public void testExpungeByVmListNoVms() { Assert.assertEquals(0, vmWorkJobDaoImpl.expungeByVmList( @@ -47,22 +89,52 @@ public void testExpungeByVmListNoVms() { @Test public void testExpungeByVmList() { - SearchBuilder sb = Mockito.mock(SearchBuilder.class); - SearchCriteria sc = Mockito.mock(SearchCriteria.class); - Mockito.when(sb.create()).thenReturn(sc); - Mockito.doAnswer((Answer) invocationOnMock -> { + SearchBuilder sb = mock(SearchBuilder.class); + SearchCriteria sc = mock(SearchCriteria.class); + when(sb.create()).thenReturn(sc); + doAnswer((Answer) invocationOnMock -> { Long batchSize = (Long)invocationOnMock.getArguments()[1]; return batchSize == null ? 0 : batchSize.intValue(); - }).when(vmWorkJobDaoImpl).batchExpunge(Mockito.any(SearchCriteria.class), Mockito.anyLong()); - Mockito.when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); - final VmWorkJobVO mockedVO = Mockito.mock(VmWorkJobVO.class); - Mockito.when(sb.entity()).thenReturn(mockedVO); + }).when(vmWorkJobDaoImpl).batchExpunge(any(SearchCriteria.class), anyLong()); + when(vmWorkJobDaoImpl.createSearchBuilder()).thenReturn(sb); + final VmWorkJobVO mockedVO = mock(VmWorkJobVO.class); + when(sb.entity()).thenReturn(mockedVO); List vmIds = List.of(1L, 2L); Object[] array = vmIds.toArray(); Long batchSize = 50L; Assert.assertEquals(batchSize.intValue(), vmWorkJobDaoImpl.expungeByVmList(List.of(1L, 2L), batchSize)); - Mockito.verify(sc).setParameters("vmIds", array); - Mockito.verify(vmWorkJobDaoImpl, Mockito.times(1)) + verify(sc).setParameters("vmIds", array); + verify(vmWorkJobDaoImpl, times(1)) .batchExpunge(sc, batchSize); } + + @Test + public void testListVmIdsWithPendingJob() { + List mockVmIds = Arrays.asList(101L, 102L, 103L); + doReturn(mockVmIds).when(vmWorkJobDaoImpl).customSearch(any(SearchCriteria.class), isNull()); + List result = vmWorkJobDaoImpl.listVmIdsWithPendingJob(); + verify(genericVmWorkJobSearchBuilder).join(eq("asyncJobSearch"), eq(asyncJobSearchBuilder), any(), any(), eq(JoinBuilder.JoinType.INNER)); + verify(genericVmWorkJobSearchBuilder).and(eq("removed"), any(), eq(SearchCriteria.Op.NULL)); + verify(genericVmWorkJobSearchBuilder).create(); + verify(asyncJobSearchBuilder).and(eq("status"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchCriteria).setJoinParameters(eq("asyncJobSearch"), eq("status"), eq(JobInfo.Status.IN_PROGRESS)); + verify(vmWorkJobDaoImpl).customSearch(searchCriteria, null); + assertEquals(3, result.size()); + assertEquals(Long.valueOf(101L), result.get(0)); + assertEquals(Long.valueOf(102L), result.get(1)); + assertEquals(Long.valueOf(103L), result.get(2)); + } + + @Test + public void testListVmIdsWithPendingJobEmptyResult() { + doReturn(Collections.emptyList()).when(vmWorkJobDaoImpl).customSearch(any(SearchCriteria.class), isNull()); + List result = vmWorkJobDaoImpl.listVmIdsWithPendingJob(); + verify(genericVmWorkJobSearchBuilder).join(eq("asyncJobSearch"), eq(asyncJobSearchBuilder), any(), any(), eq(JoinBuilder.JoinType.INNER)); + verify(genericVmWorkJobSearchBuilder).and(eq("removed"), any(), eq(SearchCriteria.Op.NULL)); + verify(genericVmWorkJobSearchBuilder).create(); + verify(asyncJobSearchBuilder).and(eq("status"), any(), eq(SearchCriteria.Op.EQ)); + verify(searchCriteria).setJoinParameters(eq("asyncJobSearch"), eq("status"), eq(JobInfo.Status.IN_PROGRESS)); + verify(vmWorkJobDaoImpl).customSearch(searchCriteria, null); + assertTrue(result.isEmpty()); + } } diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index db40b6e68ddd..030e0bcf0141 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -26,20 +26,21 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.acl.RolePermissionEntity.Permission; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.commons.lang3.StringUtils; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.UnavailableCommandException; import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.user.User; +import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.PluggableService; -import org.apache.commons.lang3.StringUtils; public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements APIAclChecker { - @Inject private AccountService accountService; @Inject @@ -48,6 +49,9 @@ public class DynamicRoleBasedAPIAccessChecker extends AdapterBase implements API private List services; private Map> annotationRoleBasedApisMap = new HashMap>(); + private LazyCache accountCache; + private LazyCache>> rolePermissionsCache; + private int cachePeriod; protected DynamicRoleBasedAPIAccessChecker() { super(); @@ -99,23 +103,66 @@ public boolean checkApiPermissionByRole(Role role, String apiName, List> getRolePermissions(long roleId) { + final Role accountRole = roleService.findRole(roleId); + if (accountRole == null || accountRole.getId() < 1L) { + return new Pair<>(null, null); + } + + if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { + return new Pair<>(accountRole, null); + } + + return new Pair<>(accountRole, roleService.findAllPermissionsBy(accountRole.getId())); + } + + protected Pair> getRolePermissionsUsingCache(long roleId) { + if (cachePeriod > 0) { + return rolePermissionsCache.get(roleId); + } + return getRolePermissions(roleId); + } + + protected Account getAccountFromIdUsingCache(long accountId) { + if (cachePeriod > 0) { + return accountCache.get(accountId); + } + return getAccountFromId(accountId); + } + @Override public boolean checkAccess(User user, String commandName) throws PermissionDeniedException { if (!isEnabled()) { return true; } - - Account account = accountService.getAccount(user.getAccountId()); + Account account = getAccountFromIdUsingCache(user.getAccountId()); if (account == null) { - throw new PermissionDeniedException(String.format("The account id [%s] for user id [%s] is null.", user.getAccountId(), user.getUuid())); + throw new PermissionDeniedException(String.format("Account for user id [%s] cannot be found", user.getUuid())); } - - return checkAccess(account, commandName); + Pair> roleAndPermissions = getRolePermissionsUsingCache(account.getRoleId()); + final Role accountRole = roleAndPermissions.first(); + if (accountRole == null) { + throw new PermissionDeniedException(String.format("Account role for user id [%s] cannot be found.", user.getUuid())); + } + if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { + logger.info("Account for user id {} is Root Admin or Domain Admin, all APIs are allowed.", user.getUuid()); + return true; + } + List allPermissions = roleAndPermissions.second(); + if (checkApiPermissionByRole(accountRole, commandName, allPermissions)) { + return true; + } + throw new UnavailableCommandException(String.format("The API [%s] does not exist or is not available for the account for user id [%s].", commandName, user.getUuid())); } public boolean checkAccess(Account account, String commandName) { - final Role accountRole = roleService.findRole(account.getRoleId()); - if (accountRole == null || accountRole.getId() < 1L) { + Pair> roleAndPermissions = getRolePermissionsUsingCache(account.getRoleId()); + final Role accountRole = roleAndPermissions.first(); + if (accountRole == null) { throw new PermissionDeniedException(String.format("The account [%s] has role null or unknown.", account)); } @@ -160,6 +207,9 @@ public void addApiToRoleBasedAnnotationsMap(final RoleType roleType, final Strin @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); + cachePeriod = Math.max(0, RoleService.DynamicApiCheckerCachePeriod.value()); + accountCache = new LazyCache<>(32, cachePeriod, this::getAccountFromId); + rolePermissionsCache = new LazyCache<>(32, cachePeriod, this::getRolePermissions); return true; } diff --git a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index ec6674477b07..342ac4568f2f 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/main/java/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -321,13 +321,13 @@ private ExcludeList updateAvoidList(List dedicatedResources } } //add all hosts inside this in includeList - List hostList = _hostDao.listByDataCenterId(dr.getDataCenterId()); - for (HostVO host : hostList) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + List hostList = _hostDao.listEnabledIdsByDataCenterId(dr.getDataCenterId()); + for (Long hostId : hostList) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null && !dedicatedResources.contains(dHost)) { - avoidList.addHost(host.getId()); + avoidList.addHost(hostId); } else { - includeList.addHost(host.getId()); + includeList.addHost(hostId); } } } @@ -337,7 +337,7 @@ private ExcludeList updateAvoidList(List dedicatedResources List pods = _podDao.listByDataCenterId(dc.getId()); List clusters = _clusterDao.listClustersByDcId(dc.getId()); - List hosts = _hostDao.listByDataCenterId(dc.getId()); + List hostIds = _hostDao.listEnabledIdsByDataCenterId(dc.getId()); Set podsInIncludeList = includeList.getPodsToAvoid(); Set clustersInIncludeList = includeList.getClustersToAvoid(); Set hostsInIncludeList = includeList.getHostsToAvoid(); @@ -357,9 +357,9 @@ private ExcludeList updateAvoidList(List dedicatedResources } } - for (HostVO host : hosts) { - if (hostsInIncludeList != null && !hostsInIncludeList.contains(host.getId())) { - avoidList.addHost(host.getId()); + for (Long hostId : hostIds) { + if (hostsInIncludeList != null && !hostsInIncludeList.contains(hostId)) { + avoidList.addHost(hostId); } } return avoidList; diff --git a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index 4f1db396b7c4..7633a895ecd6 100644 --- a/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/main/java/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -23,7 +23,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; @@ -45,8 +44,9 @@ import org.apache.cloudstack.api.response.DedicateZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.logging.log4j.Logger; +import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -126,7 +126,7 @@ public boolean configure(final String name, final Map params) th @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Zone") public List dedicateZone(final Long zoneId, final Long domainId, final String accountName) { Long accountId = null; - List hosts = null; + List hostIds = null; if (accountName != null) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); @@ -203,18 +203,20 @@ public List dedicateZone(final Long zoneId, final Long doma releaseDedicatedResource(null, null, dr.getClusterId(), null); } - hosts = _hostDao.listByDataCenterId(dc.getId()); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + hostIds = _hostDao.listEnabledIdsByDataCenterId(dc.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(childDomainIds.contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } if (accountId != null) { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); + HostVO host = _hostDao.findById(hostId); + logger.error("Host {} under this Zone {} is dedicated to different account/domain", host.getName(), dc.getName()); throw new CloudRuntimeException("Host " + host.getName() + " under this Zone " + dc.getName() + " is dedicated to different account/domain"); } } else { @@ -230,7 +232,7 @@ public List dedicateZone(final Long zoneId, final Long doma } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -284,7 +286,7 @@ public List dedicatePod(final Long podId, final Long domain childDomainIds.add(domainId); checkAccountAndDomain(accountId, domainId); HostPodVO pod = _podDao.findById(podId); - List hosts = null; + List hostIds = null; if (pod == null) { throw new InvalidParameterValueException("Unable to find pod by id " + podId); } else { @@ -339,18 +341,20 @@ public List dedicatePod(final Long podId, final Long domain releaseDedicatedResource(null, null, dr.getClusterId(), null); } - hosts = _hostDao.findByPodId(pod.getId()); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + hostIds = _hostDao.listIdsByPodId(pod.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(getDomainChildIds(domainId).contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } if (accountId != null) { if (dHost.getAccountId().equals(accountId)) { hostsToRelease.add(dHost); } else { - logger.error("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); + HostVO host = _hostDao.findById(hostId); + logger.error("Host {} under this Pod {} is dedicated to different account/domain", host.getName(), pod.getName()); throw new CloudRuntimeException("Host " + host.getName() + " under this Pod " + pod.getName() + " is dedicated to different account/domain"); } } else { @@ -366,7 +370,7 @@ public List dedicatePod(final Long podId, final Long domain } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -402,7 +406,7 @@ public List doInTransaction(TransactionStatus status) { @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Cluster") public List dedicateCluster(final Long clusterId, final Long domainId, final String accountName) { Long accountId = null; - List hosts = null; + List hostIds = null; if (accountName != null) { Account caller = CallContext.current().getCallingAccount(); Account owner = _accountMgr.finalizeOwner(caller, accountName, domainId, null); @@ -448,12 +452,13 @@ public List dedicateCluster(final Long clusterId, final Lon } //check if any resource under this cluster is dedicated to different account or sub-domain - hosts = _hostDao.findByClusterId(cluster.getId()); + hostIds = _hostDao.listIdsByClusterId(cluster.getId()); List hostsToRelease = new ArrayList(); - for (HostVO host : hosts) { - DedicatedResourceVO dHost = _dedicatedDao.findByHostId(host.getId()); + for (Long hostId : hostIds) { + DedicatedResourceVO dHost = _dedicatedDao.findByHostId(hostId); if (dHost != null) { if (!(childDomainIds.contains(dHost.getDomainId()))) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException("Host " + host.getName() + " under this Cluster " + cluster.getName() + " is dedicated to different account/domain"); } @@ -479,7 +484,7 @@ public List dedicateCluster(final Long clusterId, final Lon } } - checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); + checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hostIds); final Long accountIdFinal = accountId; return Transaction.execute(new TransactionCallback>() { @@ -685,10 +690,10 @@ private boolean checkHostSuitabilityForExplicitDedication(Long accountId, List domainIds, List hosts) { + private boolean checkHostsSuitabilityForExplicitDedication(Long accountId, List domainIds, List hostIds) { boolean suitable = true; - for (HostVO host : hosts) { - checkHostSuitabilityForExplicitDedication(accountId, domainIds, host.getId()); + for (Long hostId : hostIds) { + checkHostSuitabilityForExplicitDedication(accountId, domainIds, hostId); } return suitable; } diff --git a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java index bd1bcf061013..5bbdd3411b42 100644 --- a/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/main/java/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -21,14 +21,15 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.commons.collections.CollectionUtils; import com.cloud.configuration.Config; import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.host.HostVO; import com.cloud.resource.ResourceManager; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; @@ -38,7 +39,6 @@ import com.cloud.utils.NumbersUtil; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachineProfile; -import org.springframework.util.CollectionUtils; public class ImplicitDedicationPlanner extends FirstFitPlanner implements DeploymentClusterPlanner { @@ -73,12 +73,11 @@ public List orderClusters(VirtualMachineProfile vmProfile, DeploymentPlan boolean preferred = isServiceOfferingUsingPlannerInPreferredMode(vmProfile.getServiceOfferingId()); // Get the list of all the hosts in the given clusters - List allHosts = new ArrayList(); - for (Long cluster : clusterList) { - List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); - for (HostVO hostVO : hostsInCluster) { - allHosts.add(hostVO.getId()); - } + List allHosts = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(clusterList)) { + allHosts = clusterList.stream() + .flatMap(cluster -> hostDao.listIdsByClusterId(cluster).stream()) + .collect(Collectors.toList()); } // Go over all the hosts in the cluster and get a list of @@ -221,20 +220,15 @@ private boolean isServiceOfferingUsingPlannerInPreferredMode(long serviceOfferin } private List getUpdatedClusterList(List clusterList, Set hostsSet) { - List updatedClusterList = new ArrayList(); - for (Long cluster : clusterList) { - List hosts = resourceMgr.listAllHostsInCluster(cluster); - Set hostsInClusterSet = new HashSet(); - for (HostVO host : hosts) { - hostsInClusterSet.add(host.getId()); - } - - if (!hostsSet.containsAll(hostsInClusterSet)) { - updatedClusterList.add(cluster); - } + if (CollectionUtils.isEmpty(clusterList)) { + return new ArrayList<>(); } - - return updatedClusterList; + return clusterList.stream() + .filter(cluster -> { + Set hostsInClusterSet = new HashSet<>(hostDao.listIdsByClusterId(cluster)); + return !hostsSet.containsAll(hostsInClusterSet); + }) + .collect(Collectors.toList()); } @Override @@ -254,15 +248,11 @@ public PlannerResourceUsage getResourceUsage(VirtualMachineProfile vmProfile, De Account account = vmProfile.getOwner(); // Get the list of all the hosts in the given clusters - List allHosts = new ArrayList(); - if (!CollectionUtils.isEmpty(clusterList)) { - for (Long cluster : clusterList) { - List hostsInCluster = resourceMgr.listAllHostsInCluster(cluster); - for (HostVO hostVO : hostsInCluster) { - - allHosts.add(hostVO.getId()); - } - } + List allHosts = new ArrayList<>(); + if (CollectionUtils.isNotEmpty(clusterList)) { + allHosts = clusterList.stream() + .flatMap(cluster -> hostDao.listIdsByClusterId(cluster).stream()) + .collect(Collectors.toList()); } // Go over all the hosts in the cluster and get a list of // 1. All empty hosts, not running any vms. diff --git a/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java b/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java index e174824cfdd2..2d2b4c78261e 100644 --- a/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java +++ b/plugins/deployment-planners/implicit-dedication/src/test/java/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java @@ -16,11 +16,11 @@ // under the License. package org.apache.cloudstack.implicitplanner; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -36,7 +36,11 @@ import javax.inject.Inject; -import com.cloud.user.User; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.test.utils.SpringUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -54,12 +58,6 @@ import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.test.utils.SpringUtils; - import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.dao.CapacityDao; @@ -73,7 +71,6 @@ import com.cloud.deploy.ImplicitDedicationPlanner; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.gpu.dao.HostGpuGroupsDao; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; import com.cloud.host.dao.HostTagsDao; @@ -90,6 +87,7 @@ import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; +import com.cloud.user.User; import com.cloud.user.UserVO; import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentContext; @@ -387,21 +385,9 @@ private void initializeForImplicitPlannerTest(boolean preferred) { when(serviceOfferingDetailsDao.listDetailsKeyPairs(offeringId)).thenReturn(details); // Initialize hosts in clusters - HostVO host1 = mock(HostVO.class); - when(host1.getId()).thenReturn(5L); - HostVO host2 = mock(HostVO.class); - when(host2.getId()).thenReturn(6L); - HostVO host3 = mock(HostVO.class); - when(host3.getId()).thenReturn(7L); - List hostsInCluster1 = new ArrayList(); - List hostsInCluster2 = new ArrayList(); - List hostsInCluster3 = new ArrayList(); - hostsInCluster1.add(host1); - hostsInCluster2.add(host2); - hostsInCluster3.add(host3); - when(resourceMgr.listAllHostsInCluster(1)).thenReturn(hostsInCluster1); - when(resourceMgr.listAllHostsInCluster(2)).thenReturn(hostsInCluster2); - when(resourceMgr.listAllHostsInCluster(3)).thenReturn(hostsInCluster3); + when(hostDao.listIdsByClusterId(1L)).thenReturn(List.of(5L)); + when(hostDao.listIdsByClusterId(2L)).thenReturn(List.of(6L)); + when(hostDao.listIdsByClusterId(3L)).thenReturn(List.of(7L)); // Mock vms on each host. long offeringIdForVmsOfThisAccount = 15L; diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java index 80ced4c230db..e1f4d87fa7eb 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/resource/AgentRoutingResource.java @@ -109,7 +109,7 @@ public Type getType() { public PingCommand getCurrentStatus(long id) { TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { - MockConfigurationVO config = _simMgr.getMockConfigurationDao().findByNameBottomUP(agentHost.getDataCenterId(), agentHost.getPodId(), agentHost.getClusterId(), agentHost.getId(), "PingCommand"); + MockConfigurationVO config = null; if (config != null) { Map configParameters = config.getParameters(); for (Map.Entry entry : configParameters.entrySet()) { @@ -122,7 +122,7 @@ public PingCommand getCurrentStatus(long id) { } } - config = _simMgr.getMockConfigurationDao().findByNameBottomUP(agentHost.getDataCenterId(), agentHost.getPodId(), agentHost.getClusterId(), agentHost.getId(), "PingRoutingWithNwGroupsCommand"); + config = null; if (config != null) { String message = config.getJsonResponse(); if (message != null) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java index 8a5e59e4373c..4fa7e7882244 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/discoverer/XcpServerDiscoverer.java @@ -31,6 +31,7 @@ import javax.persistence.EntityExistsException; import org.apache.cloudstack.hypervisor.xenserver.XenserverConfigs; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.maven.artifact.versioning.ComparableVersion; import org.apache.xmlrpc.XmlRpcException; @@ -144,8 +145,8 @@ void setClusterGuid(ClusterVO cluster, String guid) { sc.and(sc.entity().getGuid(), Op.EQ, guid); List clusters = sc.list(); ClusterVO clu = clusters.get(0); - List clusterHosts = _resourceMgr.listAllHostsInCluster(clu.getId()); - if (clusterHosts == null || clusterHosts.size() == 0) { + List clusterHostIds = _hostDao.listIdsByClusterId(clu.getId()); + if (CollectionUtils.isEmpty(clusterHostIds)) { clu.setGuid(null); _clusterDao.update(clu.getId(), clu); _clusterDao.update(cluster.getId(), cluster); @@ -245,8 +246,8 @@ protected boolean poolHasHotFix(Connection conn, String hostIp, String hotFixUui if (clu.getGuid() == null) { setClusterGuid(clu, poolUuid); } else { - List clusterHosts = _resourceMgr.listAllHostsInCluster(clusterId); - if (clusterHosts != null && clusterHosts.size() > 0) { + List clusterHostIds = _hostDao.listIdsByClusterId(clusterId); + if (CollectionUtils.isNotEmpty(clusterHostIds)) { if (!clu.getGuid().equals(poolUuid)) { String msg = "Please join the host " + hostIp + " to XS pool " + clu.getGuid() + " through XC/XS before adding it through CS UI"; diff --git a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java index a84b1a6e2dea..e5dc0b291713 100644 --- a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java +++ b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java @@ -298,8 +298,8 @@ private void addHostTagsMetrics(final List metricsList, final long dcId, f metricsList.add(new ItemHostMemory(zoneName, zoneUuid, null, null, null, null, ALLOCATED, allocatedCapacityByTag.third(), 0, tag)); }); - List allHostTagVOS = hostDao.listAll().stream() - .flatMap( h -> _hostTagsDao.getHostTags(h.getId()).stream()) + List allHostTagVOS = hostDao.listAllIds().stream() + .flatMap( h -> _hostTagsDao.getHostTags(h).stream()) .distinct() .collect(Collectors.toList()); List allHostTags = new ArrayList<>(); diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsService.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsService.java index 48033dd75389..bb7763688385 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsService.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsService.java @@ -30,6 +30,7 @@ import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.response.ClusterMetricsResponse; import org.apache.cloudstack.response.DbMetricsResponse; import org.apache.cloudstack.response.HostMetricsResponse; @@ -47,6 +48,11 @@ import com.cloud.utils.component.PluggableService; public interface MetricsService extends PluggableService { + + ConfigKey AllowListMetricsComputation = new ConfigKey<>("Advanced", Boolean.class, "allow.list.metrics.computation", "true", + "Whether the list zones and cluster metrics APIs are allowed metrics computation. Large environments may disabled this.", + true, ConfigKey.Scope.Global); + InfrastructureResponse listInfrastructure(); ListResponse searchForVmMetricsStats(ListVMsUsageHistoryCmd cmd); @@ -56,10 +62,10 @@ public interface MetricsService extends PluggableService { List listVmMetrics(List vmResponses); List listStoragePoolMetrics(List poolResponses); List listHostMetrics(List poolResponses); - List listManagementServerMetrics(List poolResponses); List listClusterMetrics(Pair, Integer> clusterResponses); List listZoneMetrics(List poolResponses); + List listManagementServerMetrics(List poolResponses); UsageServerMetricsResponse listUsageServerMetrics(); DbMetricsResponse listDbMetrics(); } diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java index 6025a41d69cb..64ceddcc1a0c 100644 --- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java +++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java @@ -61,6 +61,8 @@ import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.cluster.ClusterDrsAlgorithm; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.management.ManagementServerHost.State; import org.apache.cloudstack.response.ClusterMetricsResponse; import org.apache.cloudstack.response.DbMetricsResponse; @@ -110,8 +112,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.network.router.VirtualRouter; import com.cloud.org.Cluster; -import com.cloud.org.Grouping; -import com.cloud.org.Managed; import com.cloud.server.DbStatsCollection; import com.cloud.server.ManagementServerHostStats; import com.cloud.server.StatsCollector; @@ -141,8 +141,7 @@ import com.cloud.vm.dao.VmStatsDao; import com.google.gson.Gson; -public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements MetricsService { - +public class MetricsServiceImpl extends MutualExclusiveIdsManagerBase implements MetricsService, Configurable { @Inject private DataCenterDao dataCenterDao; @Inject @@ -197,7 +196,6 @@ private Double findRatioValue(final String value) { } private void updateHostMetrics(final HostMetrics hostMetrics, final HostJoinVO host) { - hostMetrics.incrTotalHosts(); hostMetrics.addCpuAllocated(host.getCpuReservedCapacity() + host.getCpuUsedCapacity()); hostMetrics.addMemoryAllocated(host.getMemReservedCapacity() + host.getMemUsedCapacity()); final HostStats hostStats = ApiDBUtils.getHostStatistics(host.getId()); @@ -561,22 +559,17 @@ public InfrastructureResponse listInfrastructure() { response.setZones(dataCenterDao.countAll()); response.setPods(podDao.countAll()); response.setClusters(clusterDao.countAll()); - response.setHosts(hostDao.countAllByType(Host.Type.Routing)); + Pair hostCountAndCpuSockets = hostDao.countAllHostsAndCPUSocketsByType(Host.Type.Routing); + response.setHosts(hostCountAndCpuSockets.first()); response.setStoragePools(storagePoolDao.countAll()); response.setImageStores(imageStoreDao.countAllImageStores()); response.setObjectStores(objectStoreDao.countAllObjectStores()); - response.setSystemvms(vmInstanceDao.listByTypes(VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm).size()); + response.setSystemvms(vmInstanceDao.countByTypes(VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm)); response.setRouters(domainRouterDao.countAllByRole(VirtualRouter.Role.VIRTUAL_ROUTER)); response.setInternalLbs(domainRouterDao.countAllByRole(VirtualRouter.Role.INTERNAL_LB_VM)); response.setAlerts(alertDao.countAll()); - int cpuSockets = 0; - for (final Host host : hostDao.listByType(Host.Type.Routing)) { - if (host.getCpuSockets() != null) { - cpuSockets += host.getCpuSockets(); - } - } - response.setCpuSockets(cpuSockets); - response.setManagementServers(managementServerHostDao.listAll().size()); + response.setCpuSockets(hostCountAndCpuSockets.second()); + response.setManagementServers(managementServerHostDao.countAll()); return response; } @@ -764,38 +757,44 @@ public List listClusterMetrics(Pair> cpuList = new ArrayList<>(); - List> memoryList = new ArrayList<>(); - - for (final Host host: hostDao.findByClusterId(clusterId)) { - if (host == null || host.getType() != Host.Type.Routing) { - continue; + hostMetrics.setUpResources(Long.valueOf(hostDao.countAllInClusterByTypeAndStates(clusterId, Host.Type.Routing, List.of(Status.Up)))); + hostMetrics.setTotalResources(Long.valueOf(hostDao.countAllInClusterByTypeAndStates(clusterId, Host.Type.Routing, null))); + hostMetrics.setTotalHosts(hostMetrics.getTotalResources()); + + if (AllowListMetricsComputation.value()) { + List> cpuList = new ArrayList<>(); + List> memoryList = new ArrayList<>(); + for (final Host host : hostDao.findByClusterId(clusterId)) { + if (host == null || host.getType() != Host.Type.Routing) { + continue; + } + updateHostMetrics(hostMetrics, hostJoinDao.findById(host.getId())); + HostJoinVO hostJoin = hostJoinDao.findById(host.getId()); + cpuList.add(new Ternary<>(hostJoin.getCpuUsedCapacity(), hostJoin.getCpuReservedCapacity(), hostJoin.getCpus() * hostJoin.getSpeed())); + memoryList.add(new Ternary<>(hostJoin.getMemUsedCapacity(), hostJoin.getMemReservedCapacity(), hostJoin.getTotalMemory())); } - if (host.getStatus() == Status.Up) { - hostMetrics.incrUpResources(); + try { + Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null); + metricsResponse.setDrsImbalance(imbalance.isNaN() ? null : 100.0 * imbalance); + } catch (ConfigurationException e) { + logger.warn("Failed to get cluster imbalance for cluster {}", clusterId, e); + } + } else { + if (cpuCapacity != null) { + hostMetrics.setCpuAllocated(cpuCapacity.getAllocatedCapacity()); + } + if (memoryCapacity != null) { + hostMetrics.setMemoryAllocated(memoryCapacity.getAllocatedCapacity()); } - hostMetrics.incrTotalResources(); - HostJoinVO hostJoin = hostJoinDao.findById(host.getId()); - updateHostMetrics(hostMetrics, hostJoin); - - cpuList.add(new Ternary<>(hostJoin.getCpuUsedCapacity(), hostJoin.getCpuReservedCapacity(), hostJoin.getCpus() * hostJoin.getSpeed())); - memoryList.add(new Ternary<>(hostJoin.getMemUsedCapacity(), hostJoin.getMemReservedCapacity(), hostJoin.getTotalMemory())); - } - - try { - Double imbalance = ClusterDrsAlgorithm.getClusterImbalance(clusterId, cpuList, memoryList, null); - metricsResponse.setDrsImbalance(imbalance.isNaN() ? null : 100.0 * imbalance); - } catch (ConfigurationException e) { - logger.warn("Failed to get cluster imbalance for cluster " + clusterId, e); } - metricsResponse.setState(clusterResponse.getAllocationState(), clusterResponse.getManagedState()); - metricsResponse.setResources(hostMetrics.getUpResources(), hostMetrics.getTotalResources()); addHostCpuMetricsToResponse(metricsResponse, clusterId, hostMetrics); addHostMemoryMetricsToResponse(metricsResponse, clusterId, hostMetrics); metricsResponse.setHasAnnotation(clusterResponse.hasAnnotation()); + metricsResponse.setState(clusterResponse.getAllocationState(), clusterResponse.getManagedState()); + metricsResponse.setResources(hostMetrics.getUpResources(), hostMetrics.getTotalResources()); + metricsResponses.add(metricsResponse); } return metricsResponses; @@ -942,35 +941,38 @@ public List listZoneMetrics(List zoneResponse final CapacityDaoImpl.SummedCapacity cpuCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_CPU, zoneId, null); final CapacityDaoImpl.SummedCapacity memoryCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_MEMORY, zoneId, null); final HostMetrics hostMetrics = new HostMetrics(cpuCapacity, memoryCapacity); + hostMetrics.setUpResources(Long.valueOf(clusterDao.countAllManagedAndEnabledByDcId(zoneId))); + hostMetrics.setTotalResources(Long.valueOf(clusterDao.countAllByDcId(zoneId))); + hostMetrics.setTotalHosts(Long.valueOf(hostDao.countAllByTypeInZone(zoneId, Host.Type.Routing))); - for (final Cluster cluster : clusterDao.listClustersByDcId(zoneId)) { - if (cluster == null) { - continue; - } - hostMetrics.incrTotalResources(); - if (cluster.getAllocationState() == Grouping.AllocationState.Enabled - && cluster.getManagedState() == Managed.ManagedState.Managed) { - hostMetrics.incrUpResources(); - } - - for (final Host host: hostDao.findByClusterId(cluster.getId())) { - if (host == null || host.getType() != Host.Type.Routing) { + if (AllowListMetricsComputation.value()) { + for (final Cluster cluster : clusterDao.listClustersByDcId(zoneId)) { + if (cluster == null) { continue; } - updateHostMetrics(hostMetrics, hostJoinDao.findById(host.getId())); + for (final Host host: hostDao.findByClusterId(cluster.getId())) { + if (host == null || host.getType() != Host.Type.Routing) { + continue; + } + updateHostMetrics(hostMetrics, hostJoinDao.findById(host.getId())); + } + } + } else { + if (cpuCapacity != null) { + hostMetrics.setCpuAllocated(cpuCapacity.getAllocatedCapacity()); + } + if (memoryCapacity != null) { + hostMetrics.setMemoryAllocated(memoryCapacity.getAllocatedCapacity()); } } + addHostCpuMetricsToResponse(metricsResponse, null, hostMetrics); + addHostMemoryMetricsToResponse(metricsResponse, null, hostMetrics); + metricsResponse.setHasAnnotation(zoneResponse.hasAnnotation()); metricsResponse.setState(zoneResponse.getAllocationState()); metricsResponse.setResource(hostMetrics.getUpResources(), hostMetrics.getTotalResources()); - final Long totalHosts = hostMetrics.getTotalHosts(); - // CPU - addHostCpuMetricsToResponse(metricsResponse, null, hostMetrics); - // Memory - addHostMemoryMetricsToResponse(metricsResponse, null, hostMetrics); - metricsResponses.add(metricsResponse); } return metricsResponses; @@ -1028,12 +1030,14 @@ public DbMetricsResponse listDbMetrics() { private void getQueryHistory(DbMetricsResponse response) { Map dbStats = ApiDBUtils.getDbStatistics(); - if (dbStats != null) { - response.setQueries((Long)dbStats.get(DbStatsCollection.queries)); - response.setUptime((Long)dbStats.get(DbStatsCollection.uptime)); + if (dbStats == null) { + return; } - List loadHistory = (List) dbStats.get(DbStatsCollection.loadAvarages); + response.setQueries((Long)dbStats.getOrDefault(DbStatsCollection.queries, -1L)); + response.setUptime((Long)dbStats.getOrDefault(DbStatsCollection.uptime, -1L)); + + List loadHistory = (List) dbStats.getOrDefault(DbStatsCollection.loadAvarages, new ArrayList()); double[] loadAverages = new double[loadHistory.size()]; int index = 0; @@ -1108,6 +1112,16 @@ public List> getCommands() { return cmdList; } + @Override + public String getConfigComponentName() { + return MetricsService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {AllowListMetricsComputation}; + } + private class HostMetrics { // CPU metrics private Long totalCpu = 0L; @@ -1133,6 +1147,14 @@ public HostMetrics(final CapacityDaoImpl.SummedCapacity totalCpu, final Capacity } } + public void setCpuAllocated(Long cpuAllocated) { + this.cpuAllocated = cpuAllocated; + } + + public void setMemoryAllocated(Long memoryAllocated) { + this.memoryAllocated = memoryAllocated; + } + public void addCpuAllocated(Long cpuAllocated) { this.cpuAllocated += cpuAllocated; } @@ -1161,16 +1183,16 @@ public void setMaximumMemoryUsage(Long maximumMemoryUsage) { } } - public void incrTotalHosts() { - this.totalHosts++; + public void setTotalHosts(Long totalHosts) { + this.totalHosts = totalHosts; } - public void incrTotalResources() { - this.totalResources++; + public void setTotalResources(Long totalResources) { + this.totalResources = totalResources; } - public void incrUpResources() { - this.upResources++; + public void setUpResources(Long upResources) { + this.upResources = upResources; } public Long getTotalCpu() { diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index bc66e2ff1360..db6a8b4e778d 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -18,6 +18,26 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateStoragePoolCommand; @@ -45,6 +65,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachineManager; @@ -53,23 +74,6 @@ import com.cloud.vm.dao.SecondaryStorageVmDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; - -import javax.inject.Inject; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; public class CloudStackPrimaryDataStoreLifeCycleImpl extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { @Inject @@ -318,16 +322,15 @@ public DataStore initialize(Map dsInfos) { private void validateVcenterDetails(Long zoneId, Long podId, Long clusterId, String storageHost) { - List allHosts = - _resourceMgr.listAllUpHosts(Host.Type.Routing, clusterId, podId, zoneId); - if (allHosts.isEmpty()) { + List allHostIds = _hostDao.listIdsForUpRouting(zoneId, podId, clusterId); + if (allHostIds.isEmpty()) { throw new CloudRuntimeException("No host up to associate a storage pool with in zone: " + zoneId + " pod: " + podId + " cluster: " + clusterId); } boolean success = false; - for (HostVO h : allHosts) { + for (Long hId : allHostIds) { ValidateVcenterDetailsCommand cmd = new ValidateVcenterDetailsCommand(storageHost); - final Answer answer = agentMgr.easySend(h.getId(), cmd); + final Answer answer = agentMgr.easySend(hId, cmd); if (answer != null && answer.getResult()) { logger.info("Successfully validated vCenter details provided"); return; @@ -335,8 +338,7 @@ private void validateVcenterDetails(Long zoneId, Long podId, Long clusterId, Str if (answer != null) { throw new InvalidParameterValueException("Provided vCenter server details does not match with the existing vCenter in zone id: " + zoneId); } else { - String msg = "Can not validate vCenter through host " + h.getId() + " due to ValidateVcenterDetailsCommand returns null"; - logger.warn(msg); + logger.warn("Can not validate vCenter through host {} due to ValidateVcenterDetailsCommand returns null", hId); } } } @@ -373,85 +375,54 @@ protected boolean createStoragePool(long hostId, StoragePool pool) { } } - @Override - public boolean attachCluster(DataStore store, ClusterScope scope) { - PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)store; - // Check if there is host up in this cluster - List allHosts = - _resourceMgr.listAllUpHosts(Host.Type.Routing, primarystore.getClusterId(), primarystore.getPodId(), primarystore.getDataCenterId()); + private Pair, Boolean> prepareOcfs2NodesIfNeeded(PrimaryDataStoreInfo primaryStore) { + if (!StoragePoolType.OCFS2.equals(primaryStore.getPoolType())) { + return new Pair<>(_hostDao.listIdsForUpRouting(primaryStore.getDataCenterId(), + primaryStore.getPodId(), primaryStore.getClusterId()), true); + } + List allHosts = _resourceMgr.listAllUpHosts(Host.Type.Routing, primaryStore.getClusterId(), + primaryStore.getPodId(), primaryStore.getDataCenterId()); if (allHosts.isEmpty()) { - primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primarystore.getClusterId()); + return new Pair<>(Collections.emptyList(), true); + } + List hostIds = allHosts.stream().map(HostVO::getId).collect(Collectors.toList()); + if (!_ocfs2Mgr.prepareNodes(allHosts, primaryStore)) { + return new Pair<>(hostIds, false); } + return new Pair<>(hostIds, true); + } - if (primarystore.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(allHosts, primarystore)) { - logger.warn("Can not create storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); - primaryDataStoreDao.expunge(primarystore.getId()); + @Override + public boolean attachCluster(DataStore store, ClusterScope scope) { + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)store; + Pair, Boolean> result = prepareOcfs2NodesIfNeeded(primaryStore); + List hostIds = result.first(); + if (hostIds.isEmpty()) { + primaryDataStoreDao.expunge(primaryStore.getId()); + throw new CloudRuntimeException("No host up to associate a storage pool with in cluster " + primaryStore.getClusterId()); + } + if (!result.second()) { + logger.warn("Can not create storage pool {} on cluster {}", primaryStore, primaryStore.getClusterId()); + primaryDataStoreDao.expunge(primaryStore.getId()); return false; } - - boolean success = false; - for (HostVO h : allHosts) { - success = createStoragePool(h.getId(), primarystore); - if (success) { + for (Long hId : hostIds) { + if (createStoragePool(hId, primaryStore)) { break; } } - logger.debug("In createPool Adding the pool to each of the hosts"); - List poolHosts = new ArrayList(); - for (HostVO h : allHosts) { - try { - storageMgr.connectHostToSharedPool(h.getId(), primarystore.getId()); - poolHosts.add(h); - } catch (StorageConflictException se) { - primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("Storage has already been added as local storage"); - } catch (Exception e) { - logger.warn("Unable to establish a connection between " + h + " and " + primarystore, e); - String reason = storageMgr.getStoragePoolMountFailureReason(e.getMessage()); - if (reason != null) { - throw new CloudRuntimeException(reason); - } - } - } - - if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + primarystore + " on cluster " + primarystore.getClusterId()); - primaryDataStoreDao.expunge(primarystore.getId()); - throw new CloudRuntimeException("Failed to access storage pool"); - } - + storageMgr.connectHostsToPool(store, hostIds, scope, true, true); dataStoreHelper.attachCluster(store); return true; } @Override - public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { - List hosts = _resourceMgr.listAllUpHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); + public boolean attachZone(DataStore store, ZoneScope scope, HypervisorType hypervisorType) { + List hostIds = _hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType); logger.debug("In createPool. Attaching the pool to each of the hosts."); - List poolHosts = new ArrayList(); - for (HostVO host : hosts) { - try { - storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); - poolHosts.add(host); - } catch (StorageConflictException se) { - primaryDataStoreDao.expunge(dataStore.getId()); - throw new CloudRuntimeException("Storage has already been added as local storage to host: " + host.getName()); - } catch (Exception e) { - logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); - String reason = storageMgr.getStoragePoolMountFailureReason(e.getMessage()); - if (reason != null) { - throw new CloudRuntimeException(reason); - } - } - } - if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + dataStore + " in this zone."); - primaryDataStoreDao.expunge(dataStore.getId()); - throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); - } - dataStoreHelper.attachZone(dataStore, hypervisorType); + storageMgr.connectHostsToPool(store, hostIds, scope, true, true); + dataStoreHelper.attachZone(store, hypervisorType); return true; } diff --git a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java index 924c98b7912f..09acd64885dc 100644 --- a/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java +++ b/plugins/storage/volume/default/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImplTest.java @@ -19,23 +19,13 @@ package org.apache.cloudstack.storage.datastore.lifecycle; -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.ModifyStoragePoolAnswer; -import com.cloud.agent.api.ModifyStoragePoolCommand; -import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.exception.StorageConflictException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.resource.ResourceManager; -import com.cloud.resource.ResourceState; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.Storage; -import com.cloud.storage.StorageManager; -import com.cloud.storage.StorageManagerImpl; -import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.utils.exception.CloudRuntimeException; -import junit.framework.TestCase; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.when; + +import java.util.List; + import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -58,14 +48,22 @@ import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.exception.StorageConflictException; +import com.cloud.host.dao.HostDao; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StorageManagerImpl; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.exception.CloudRuntimeException; + +import junit.framework.TestCase; /** * Created by ajna123 on 9/22/2015. @@ -118,6 +116,9 @@ public class CloudStackPrimaryDataStoreLifeCycleImplTest extends TestCase { @Mock PrimaryDataStoreHelper primaryDataStoreHelper; + @Mock + HostDao hostDao; + AutoCloseable closeable; @Before @@ -129,17 +130,6 @@ public void initMocks() throws StorageConflictException { ReflectionTestUtils.setField(storageMgr, "_dataStoreMgr", _dataStoreMgr); ReflectionTestUtils.setField(_cloudStackPrimaryDataStoreLifeCycle, "storageMgr", storageMgr); - List hostList = new ArrayList(); - HostVO host1 = new HostVO(1L, "aa01", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, - UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.NetworkFilesystem); - HostVO host2 = new HostVO(1L, "aa02", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, - UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.NetworkFilesystem); - - host1.setResourceState(ResourceState.Enabled); - host2.setResourceState(ResourceState.Disabled); - hostList.add(host1); - hostList.add(host2); - when(_dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem); when(store.isShared()).thenReturn(true); @@ -154,7 +144,8 @@ public void initMocks() throws StorageConflictException { storageMgr.registerHostListener("default", hostListener); - when(_resourceMgr.listAllUpHosts(eq(Host.Type.Routing), anyLong(), anyLong(), anyLong())).thenReturn(hostList); + when(hostDao.listIdsForUpRouting(anyLong(), anyLong(), anyLong())) + .thenReturn(List.of(1L, 2L)); when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer); when(answer.getResult()).thenReturn(true); @@ -173,18 +164,17 @@ public void testAttachCluster() throws Exception { } @Test - public void testAttachClusterException() throws Exception { - String exceptionString = "Mount failed due to incorrect mount options."; + public void testAttachClusterException() { String mountFailureReason = "Incorrect mount option specified."; - CloudRuntimeException exception = new CloudRuntimeException(exceptionString); + ClusterScope scope = new ClusterScope(1L, 1L, 1L); + CloudRuntimeException exception = new CloudRuntimeException(mountFailureReason); StorageManager storageManager = Mockito.mock(StorageManager.class); - Mockito.when(storageManager.connectHostToSharedPool(Mockito.anyLong(), Mockito.anyLong())).thenThrow(exception); - Mockito.when(storageManager.getStoragePoolMountFailureReason(exceptionString)).thenReturn(mountFailureReason); + Mockito.doThrow(exception).when(storageManager).connectHostsToPool(Mockito.eq(store), Mockito.anyList(), Mockito.eq(scope), Mockito.eq(true), Mockito.eq(true)); ReflectionTestUtils.setField(_cloudStackPrimaryDataStoreLifeCycle, "storageMgr", storageManager); try { - _cloudStackPrimaryDataStoreLifeCycle.attachCluster(store, new ClusterScope(1L, 1L, 1L)); + _cloudStackPrimaryDataStoreLifeCycle.attachCluster(store, scope); Assert.fail(); } catch (Exception e) { Assert.assertEquals(e.getMessage(), mountFailureReason); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index 7bbe0331c071..9747acf29051 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -24,17 +24,12 @@ import java.net.URLDecoder; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.UUID; import javax.inject.Inject; -import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; -import org.apache.commons.collections.CollectionUtils; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -44,9 +39,13 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.commons.collections.CollectionUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -56,8 +55,7 @@ import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ResourceManager; import com.cloud.storage.Storage; @@ -77,6 +75,8 @@ public class ScaleIOPrimaryDataStoreLifeCycle extends BasePrimaryDataStoreLifeCy @Inject private ClusterDao clusterDao; @Inject + private HostDao hostDao; + @Inject private PrimaryDataStoreDao primaryDataStoreDao; @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @@ -258,28 +258,15 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { } PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore; - List hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), - primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); - if (hostsInCluster.isEmpty()) { + List hostIds = hostDao.listIdsForUpRouting(primaryDataStoreInfo.getDataCenterId(), + primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getClusterId()); + if (hostIds.isEmpty()) { primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + primaryDataStoreInfo.getClusterId()); } - logger.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId()); - List poolHosts = new ArrayList(); - for (HostVO host : hostsInCluster) { - try { - if (storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId())) { - poolHosts.add(host); - } - } catch (Exception e) { - logger.warn("Unable to establish a connection between host: " + host + " and pool: " + dataStore + "on the cluster: " + primaryDataStoreInfo.getClusterId(), e); - } - } - - if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); - } + logger.debug("Attaching the pool to each of the hosts in the cluster: {}", primaryDataStoreInfo.getClusterId()); + storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false); dataStoreHelper.attachCluster(dataStore); return true; @@ -296,21 +283,9 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString()); } - logger.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId()); - List hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); - List poolHosts = new ArrayList(); - for (HostVO host : hosts) { - try { - if (storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId())) { - poolHosts.add(host); - } - } catch (Exception e) { - logger.warn("Unable to establish a connection between host: " + host + " and pool: " + dataStore + "in the zone: " + scope.getScopeId(), e); - } - } - if (poolHosts.isEmpty()) { - logger.warn("No host can access storage pool " + dataStore + " in the zone: " + scope.getScopeId()); - } + logger.debug("Attaching the pool to each of the hosts in the zone: {}", scope.getScopeId()); + List hostIds = hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), hypervisorType); + storageMgr.connectHostsToPool(dataStore, hostIds, scope, false, false); dataStoreHelper.attachZone(dataStore); return true; diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index 52dcad519421..3d69ff86704e 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -30,7 +30,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.UUID; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -56,15 +55,11 @@ import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; -import com.cloud.resource.ResourceManager; -import com.cloud.resource.ResourceState; import com.cloud.storage.DataStoreRole; -import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StorageManagerImpl; import com.cloud.storage.StoragePoolAutomation; @@ -73,7 +68,6 @@ import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.template.TemplateManager; import com.cloud.utils.exception.CloudRuntimeException; -import org.springframework.test.util.ReflectionTestUtils; @RunWith(MockitoJUnitRunner.class) public class ScaleIOPrimaryDataStoreLifeCycleTest { @@ -85,8 +79,6 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { @Mock private PrimaryDataStoreHelper dataStoreHelper; @Mock - private ResourceManager resourceManager; - @Mock private StoragePoolAutomation storagePoolAutomation; @Mock private StoragePoolHostDao storagePoolHostDao; @@ -100,6 +92,8 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { private PrimaryDataStore store; @Mock private TemplateManager templateMgr; + @Mock + HostDao hostDao; @InjectMocks private StorageManager storageMgr = new StorageManagerImpl(); @@ -137,17 +131,8 @@ public void testAttachZone() throws Exception { final ZoneScope scope = new ZoneScope(1L); - List hostList = new ArrayList(); - HostVO host1 = new HostVO(1L, "host01", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, - UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.PowerFlex); - HostVO host2 = new HostVO(2L, "host02", Host.Type.Routing, "192.168.1.2", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, - UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.PowerFlex); - - host1.setResourceState(ResourceState.Enabled); - host2.setResourceState(ResourceState.Enabled); - hostList.add(host1); - hostList.add(host2); - when(resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, 1L)).thenReturn(hostList); + when(hostDao.listIdsForUpEnabledByZoneAndHypervisor(scope.getScopeId(), Hypervisor.HypervisorType.KVM)) + .thenReturn(List.of(1L, 2L)); when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); when(store.getId()).thenReturn(1L); diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java index 3113ae8fdaaf..f13d296af3b0 100644 --- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java +++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java @@ -219,17 +219,17 @@ public static void setSpClusterIdIfNeeded(long hostId, String clusterId, Cluster } public static Long findClusterIdByGlobalId(String globalId, ClusterDao clusterDao) { - List clusterVo = clusterDao.listAll(); - if (clusterVo.size() == 1) { + List clusterIds = clusterDao.listAllIds(); + if (clusterIds.size() == 1) { StorPoolUtil.spLog("There is only one cluster, sending backup to secondary command"); return null; } - for (ClusterVO clusterVO2 : clusterVo) { - if (globalId != null && StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterVO2.getId()) != null - && globalId.contains(StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterVO2.getId()).toString())) { - StorPoolUtil.spLog("Found cluster with id=%s for object with globalId=%s", clusterVO2.getId(), + for (Long clusterId : clusterIds) { + if (globalId != null && StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterId) != null + && globalId.contains(StorPoolConfigurationManager.StorPoolClusterId.valueIn(clusterId))) { + StorPoolUtil.spLog("Found cluster with id=%s for object with globalId=%s", clusterId, globalId); - return clusterVO2.getId(); + return clusterId; } } throw new CloudRuntimeException( diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index 4c4f08f12bd7..0fc726f78474 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -26,8 +26,11 @@ import java.util.Map; import java.util.Set; import java.util.Timer; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import javax.inject.Inject; import javax.mail.MessagingException; @@ -72,12 +75,11 @@ import com.cloud.event.EventTypes; import com.cloud.host.Host; import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; import com.cloud.network.Ipv6Service; import com.cloud.network.dao.IPAddressDao; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceManager; -import com.cloud.service.ServiceOfferingVO; -import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.StorageManager; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; @@ -121,9 +123,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi @Inject protected ConfigDepot _configDepot; @Inject - ServiceOfferingDao _offeringsDao; - @Inject Ipv6Service ipv6Service; + @Inject + HostDao hostDao; private Timer _timer = null; private long _capacityCheckPeriod = 60L * 60L * 1000L; // One hour by default. @@ -257,6 +259,64 @@ public void sendAlert(AlertType alertType, long dataCenterId, Long podId, String } } + protected void recalculateHostCapacities() { + // Calculate CPU and RAM capacities + List hostIds = hostDao.listIdsByType(Host.Type.Routing); + if (hostIds.isEmpty()) { + return; + } + ConcurrentHashMap> futures = new ConcurrentHashMap<>(); + ExecutorService executorService = Executors.newFixedThreadPool(Math.max(1, + Math.min(CapacityManager.CapacityCalculateWorkers.value(), hostIds.size()))); + for (Long hostId : hostIds) { + futures.put(hostId, executorService.submit(() -> { + final HostVO host = hostDao.findById(hostId); + _capacityMgr.updateCapacityForHost(host); + return null; + })); + } + for (Map.Entry> entry: futures.entrySet()) { + try { + entry.getValue().get(); + } catch (InterruptedException | ExecutionException e) { + logger.error(String.format("Error during capacity calculation for host: %d due to : %s", + entry.getKey(), e.getMessage()), e); + } + } + executorService.shutdown(); + } + + protected void recalculateStorageCapacities() { + List storagePoolIds = _storagePoolDao.listAllIds(); + if (storagePoolIds.isEmpty()) { + return; + } + ConcurrentHashMap> futures = new ConcurrentHashMap<>(); + ExecutorService executorService = Executors.newFixedThreadPool(Math.max(1, + Math.min(CapacityManager.CapacityCalculateWorkers.value(), storagePoolIds.size()))); + for (Long poolId: storagePoolIds) { + futures.put(poolId, executorService.submit(() -> { + final StoragePoolVO pool = _storagePoolDao.findById(poolId); + long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null); + if (pool.isShared()) { + _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk); + } else { + _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk); + } + return null; + })); + } + for (Map.Entry> entry: futures.entrySet()) { + try { + entry.getValue().get(); + } catch (InterruptedException | ExecutionException e) { + logger.error(String.format("Error during capacity calculation for storage pool: %d due to : %s", + entry.getKey(), e.getMessage()), e); + } + } + executorService.shutdown(); + } + @Override public void recalculateCapacity() { // FIXME: the right way to do this is to register a listener (see RouterStatsListener, VMSyncListener) @@ -272,36 +332,14 @@ public void recalculateCapacity() { logger.debug("recalculating system capacity"); logger.debug("Executing cpu/ram capacity update"); } - // Calculate CPU and RAM capacities - // get all hosts...even if they are not in 'UP' state - List hosts = _resourceMgr.listAllNotInMaintenanceHostsInOneZone(Host.Type.Routing, null); - if (hosts != null) { - // prepare the service offerings - List offerings = _offeringsDao.listAllIncludingRemoved(); - Map offeringsMap = new HashMap(); - for (ServiceOfferingVO offering : offerings) { - offeringsMap.put(offering.getId(), offering); - } - for (HostVO host : hosts) { - _capacityMgr.updateCapacityForHost(host, offeringsMap); - } - } + recalculateHostCapacities(); if (logger.isDebugEnabled()) { logger.debug("Done executing cpu/ram capacity update"); logger.debug("Executing storage capacity update"); } // Calculate storage pool capacity - List storagePools = _storagePoolDao.listAll(); - for (StoragePoolVO pool : storagePools) { - long disk = _capacityMgr.getAllocatedPoolCapacity(pool, null); - if (pool.isShared()) { - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, disk); - } else { - _storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, disk); - } - } - + recalculateStorageCapacities(); if (logger.isDebugEnabled()) { logger.debug("Done executing storage capacity update"); logger.debug("Executing capacity updates for public ip and Vlans"); diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 046c3c1e6bce..c9f4affba232 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -2332,7 +2332,7 @@ public Pair, Integer> searchForServerIdsAndCount(ListHostsCmd cmd) { // ids hostSearchBuilder.and("id", hostSearchBuilder.entity().getId(), SearchCriteria.Op.EQ); hostSearchBuilder.and("name", hostSearchBuilder.entity().getName(), SearchCriteria.Op.EQ); - hostSearchBuilder.and("type", hostSearchBuilder.entity().getType(), SearchCriteria.Op.LIKE); + hostSearchBuilder.and("type", hostSearchBuilder.entity().getType(), SearchCriteria.Op.EQ); hostSearchBuilder.and("status", hostSearchBuilder.entity().getStatus(), SearchCriteria.Op.EQ); hostSearchBuilder.and("dataCenterId", hostSearchBuilder.entity().getDataCenterId(), SearchCriteria.Op.EQ); hostSearchBuilder.and("podId", hostSearchBuilder.entity().getPodId(), SearchCriteria.Op.EQ); @@ -2384,7 +2384,7 @@ public Pair, Integer> searchForServerIdsAndCount(ListHostsCmd cmd) { sc.setParameters("name", name); } if (type != null) { - sc.setParameters("type", "%" + type); + sc.setParameters("type", type); } if (state != null) { sc.setParameters("status", state); @@ -4521,7 +4521,7 @@ private Pair, Integer> searchForTemplatesInternal(Long temp // check if zone is configured, if not, just return empty list List hypers = null; if (!isIso) { - hypers = _resourceMgr.listAvailHypervisorInZone(null, null); + hypers = _resourceMgr.listAvailHypervisorInZone(null); if (hypers == null || hypers.isEmpty()) { return new Pair, Integer>(new ArrayList(), 0); } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java index b4427a6315a9..58b73096de80 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDao.java @@ -44,6 +44,6 @@ UserVmResponse newUserVmResponse(ResponseView view, String objectName, UserVmJoi List listActiveByIsoId(Long isoId); - List listByAccountServiceOfferingTemplateAndNotInState(long accountId, List states, - List offeringIds, List templateIds); + List listByAccountServiceOfferingTemplateAndNotInState(long accountId, + List states, List offeringIds, List templateIds); } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index af26a242db47..7e10df24e1b5 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -693,6 +693,8 @@ public List newUserVmView(VirtualMachine... vms) { public List listByAccountServiceOfferingTemplateAndNotInState(long accountId, List states, List offeringIds, List templateIds) { SearchBuilder userVmSearch = createSearchBuilder(); + userVmSearch.selectFields(userVmSearch.entity().getId(), userVmSearch.entity().getCpu(), + userVmSearch.entity().getRamSize()); userVmSearch.and("accountId", userVmSearch.entity().getAccountId(), Op.EQ); userVmSearch.and("serviceOfferingId", userVmSearch.entity().getServiceOfferingId(), Op.IN); userVmSearch.and("templateId", userVmSearch.entity().getTemplateId(), Op.IN); @@ -713,6 +715,6 @@ public List listByAccountServiceOfferingTemplateAndNotInState(long sc.setParameters("state", states.toArray()); } sc.setParameters("displayVm", 1); - return listBy(sc); + return customSearch(sc, null); } } diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index 421c980b2096..b62eb2cee28a 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -37,6 +38,10 @@ import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.cloudstack.utils.cache.SingleCache; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.ObjectUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -50,7 +55,6 @@ import com.cloud.configuration.Config; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; -import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.deploy.DeploymentClusterPlanner; import com.cloud.event.UsageEventVO; @@ -62,7 +66,6 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.offering.ServiceOffering; -import com.cloud.org.Cluster; import com.cloud.resource.ResourceListener; import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceState; @@ -141,6 +144,9 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @Inject MessageBus _messageBus; + private LazyCache> clusterValuesCache; + private SingleCache> serviceOfferingsCache; + @Override public boolean configure(String name, Map params) throws ConfigurationException { _vmCapacityReleaseInterval = NumbersUtil.parseInt(_configDao.getValue(Config.CapacitySkipcountingHours.key()), 3600); @@ -156,6 +162,8 @@ public boolean configure(String name, Map params) throws Configu public boolean start() { _resourceMgr.registerResourceEvent(ResourceListener.EVENT_PREPARE_MAINTENANCE_AFTER, this); _resourceMgr.registerResourceEvent(ResourceListener.EVENT_CANCEL_MAINTENANCE_AFTER, this); + clusterValuesCache = new LazyCache<>(128, 60, this::getClusterValues); + serviceOfferingsCache = new SingleCache<>(60, this::getServiceOfferingsMap); return true; } @@ -209,8 +217,8 @@ public void doInTransactionWithoutResult(TransactionStatus status) { long reservedMem = capacityMemory.getReservedCapacity(); long reservedCpuCore = capacityCpuCore.getReservedCapacity(); long actualTotalCpu = capacityCpu.getTotalCapacity(); - float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, "cpuOvercommitRatio").getValue()); - float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, "memoryOvercommitRatio").getValue()); + float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, VmDetailConstants.CPU_OVER_COMMIT_RATIO).getValue()); + float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO).getValue()); int vmCPU = svo.getCpu() * svo.getSpeed(); int vmCPUCore = svo.getCpu(); long vmMem = svo.getRamSize() * 1024L * 1024L; @@ -284,8 +292,8 @@ public void allocateVmCapacity(VirtualMachine vm, final boolean fromLastHost) { final long hostId = vm.getHostId(); final HostVO host = _hostDao.findById(hostId); final long clusterId = host.getClusterId(); - final float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "cpuOvercommitRatio").getValue()); - final float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "memoryOvercommitRatio").getValue()); + final float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, VmDetailConstants.CPU_OVER_COMMIT_RATIO).getValue()); + final float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO).getValue()); final ServiceOfferingVO svo = _offeringsDao.findById(vm.getId(), vm.getServiceOfferingId()); @@ -371,13 +379,13 @@ public void doInTransactionWithoutResult(TransactionStatus status) { ",alloc_from_last:" + fromLastHost); long cluster_id = host.getClusterId(); - ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); - ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, VmDetailConstants.CPU_OVER_COMMIT_RATIO); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); boolean hostHasCpuCapability, hostHasCapacity = false; - hostHasCpuCapability = checkIfHostHasCpuCapability(host.getId(), cpucore, cpuspeed); + hostHasCpuCapability = checkIfHostHasCpuCapability(host, cpucore, cpuspeed); if (hostHasCpuCapability) { // first check from reserved capacity @@ -407,23 +415,16 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } @Override - public boolean checkIfHostHasCpuCapability(long hostId, Integer cpuNum, Integer cpuSpeed) { - + public boolean checkIfHostHasCpuCapability(Host host, Integer cpuNum, Integer cpuSpeed) { // Check host can support the Cpu Number and Speed. - Host host = _hostDao.findById(hostId); boolean isCpuNumGood = host.getCpus().intValue() >= cpuNum; boolean isCpuSpeedGood = host.getSpeed().intValue() >= cpuSpeed; if (isCpuNumGood && isCpuSpeedGood) { - if (logger.isDebugEnabled()) { - logger.debug("Host: " + hostId + " has cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + - ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); - } + logger.debug("Host: {} has cpu capability (cpu: {}, speed: {} ) to support requested CPU: {} and requested speed: {}", + host.getId(), host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); return true; - } else { - if (logger.isDebugEnabled()) { - logger.debug("Host: " + hostId + " doesn't have cpu capability (cpu:" + host.getCpus() + ", speed:" + host.getSpeed() + - ") to support requested CPU: " + cpuNum + " and requested speed: " + cpuSpeed); - } + } else {logger.debug("Host: {} doesn't have cpu capability (cpu: {}, speed: {} ) to support requested CPU: {} and requested speed: {}", + host.getId(), host.getCpus(), host.getSpeed(), cpuNum, cpuSpeed); return false; } } @@ -630,21 +631,50 @@ public long getAllocatedPoolCapacity(StoragePoolVO pool, VMTemplateVO templateFo return totalAllocatedSize; } - @DB - @Override - public void updateCapacityForHost(final Host host) { - // prepare the service offerings - List offerings = _offeringsDao.listAllIncludingRemoved(); - Map offeringsMap = new HashMap(); - for (ServiceOfferingVO offering : offerings) { - offeringsMap.put(offering.getId(), offering); + protected Pair getClusterValues(long clusterId) { + Map map = _clusterDetailsDao.findDetails(clusterId, + List.of(VmDetailConstants.CPU_OVER_COMMIT_RATIO, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO)); + return new Pair<>(map.get(VmDetailConstants.CPU_OVER_COMMIT_RATIO), + map.get(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO)); + } + + + protected Map getServiceOfferingsMap() { + List serviceOfferings = _offeringsDao.listAllIncludingRemoved(); + if (CollectionUtils.isEmpty(serviceOfferings)) { + return new HashMap<>(); + } + return serviceOfferings.stream() + .collect(Collectors.toMap( + ServiceOfferingVO::getId, + offering -> offering + )); + } + + protected ServiceOfferingVO getServiceOffering(long id) { + Map map = serviceOfferingsCache.get(); + if (map.containsKey(id)) { + return map.get(id); + } + ServiceOfferingVO serviceOfferingVO = _offeringsDao.findByIdIncludingRemoved(id); + if (serviceOfferingVO != null) { + serviceOfferingsCache.invalidate(); } - updateCapacityForHost(host, offeringsMap); + return serviceOfferingVO; + } + + protected Map getVmDetailsForCapacityCalculation(long vmId) { + return _userVmDetailsDao.listDetailsKeyPairs(vmId, + List.of(VmDetailConstants.CPU_OVER_COMMIT_RATIO, + VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, + UsageEventVO.DynamicParameters.memory.name(), + UsageEventVO.DynamicParameters.cpuNumber.name(), + UsageEventVO.DynamicParameters.cpuSpeed.name())); } @DB @Override - public void updateCapacityForHost(final Host host, final Map offeringsMap) { + public void updateCapacityForHost(final Host host) { long usedCpuCore = 0; long reservedCpuCore = 0; long usedCpu = 0; @@ -653,32 +683,27 @@ public void updateCapacityForHost(final Host host, final Map vms = _vmDao.listUpByHostId(host.getId()); - if (logger.isDebugEnabled()) { - logger.debug("Found " + vms.size() + " VMs on host " + host.getId()); - } + List vms = _vmDao.listIdServiceOfferingForUpVmsByHostId(host.getId()); + logger.debug("Found {} VMs on host {}", vms.size(), host.getId()); - final List vosMigrating = _vmDao.listVmsMigratingFromHost(host.getId()); - if (logger.isDebugEnabled()) { - logger.debug("Found " + vosMigrating.size() + " VMs are Migrating from host " + host.getId()); - } + final List vosMigrating = _vmDao.listIdServiceOfferingForVmsMigratingFromHost(host.getId()); + logger.debug("Found {} VMs are Migrating from host {}", vosMigrating.size(), host.getId()); vms.addAll(vosMigrating); - ClusterVO cluster = _clusterDao.findById(host.getClusterId()); - ClusterDetailsVO clusterDetailCpu = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); - ClusterDetailsVO clusterDetailRam = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); - Float clusterCpuOvercommitRatio = Float.parseFloat(clusterDetailCpu.getValue()); - Float clusterRamOvercommitRatio = Float.parseFloat(clusterDetailRam.getValue()); + Pair clusterValues = + clusterValuesCache.get(host.getClusterId()); + Float clusterCpuOvercommitRatio = Float.parseFloat(clusterValues.first()); + Float clusterRamOvercommitRatio = Float.parseFloat(clusterValues.second()); for (VMInstanceVO vm : vms) { Float cpuOvercommitRatio = 1.0f; Float ramOvercommitRatio = 1.0f; - Map vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId()); - String vmDetailCpu = vmDetails.get("cpuOvercommitRatio"); - String vmDetailRam = vmDetails.get("memoryOvercommitRatio"); + Map vmDetails = getVmDetailsForCapacityCalculation(vm.getId()); + String vmDetailCpu = vmDetails.get(VmDetailConstants.CPU_OVER_COMMIT_RATIO); + String vmDetailRam = vmDetails.get(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); // if vmDetailCpu or vmDetailRam is not null it means it is running in a overcommitted cluster. cpuOvercommitRatio = (vmDetailCpu != null) ? Float.parseFloat(vmDetailCpu) : clusterCpuOvercommitRatio; ramOvercommitRatio = (vmDetailRam != null) ? Float.parseFloat(vmDetailRam) : clusterRamOvercommitRatio; - ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); + ServiceOffering so = getServiceOffering(vm.getServiceOfferingId()); if (so == null) { so = _offeringsDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); } @@ -703,27 +728,26 @@ public void updateCapacityForHost(final Host host, final Map vmsByLastHostId = _vmDao.listByLastHostId(host.getId()); - if (logger.isDebugEnabled()) { - logger.debug("Found " + vmsByLastHostId.size() + " VM, not running on host " + host.getId()); - } + List vmsByLastHostId = _vmDao.listIdServiceOfferingForVmsByLastHostId(host.getId()); + logger.debug("Found {} VM, not running on host {}", vmsByLastHostId.size(), host.getId()); + for (VMInstanceVO vm : vmsByLastHostId) { Float cpuOvercommitRatio = 1.0f; Float ramOvercommitRatio = 1.0f; long lastModificationTime = Optional.ofNullable(vm.getUpdateTime()).orElse(vm.getCreated()).getTime(); long secondsSinceLastUpdate = (DateUtil.currentGMTTime().getTime() - lastModificationTime) / 1000; if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) { - UserVmDetailVO vmDetailCpu = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO); - UserVmDetailVO vmDetailRam = _userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); + Map vmDetails = getVmDetailsForCapacityCalculation(vm.getId()); + String vmDetailCpu = vmDetails.get(VmDetailConstants.CPU_OVER_COMMIT_RATIO); + String vmDetailRam = vmDetails.get(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO); if (vmDetailCpu != null) { //if vmDetail_cpu is not null it means it is running in a overcommited cluster. - cpuOvercommitRatio = Float.parseFloat(vmDetailCpu.getValue()); + cpuOvercommitRatio = Float.parseFloat(vmDetailCpu); } if (vmDetailRam != null) { - ramOvercommitRatio = Float.parseFloat(vmDetailRam.getValue()); + ramOvercommitRatio = Float.parseFloat(vmDetailRam); } - ServiceOffering so = offeringsMap.get(vm.getServiceOfferingId()); - Map vmDetails = _userVmDetailsDao.listDetailsKeyPairs(vm.getId()); + ServiceOffering so = getServiceOffering(vm.getServiceOfferingId()); if (so == null) { so = _offeringsDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); } @@ -763,9 +787,24 @@ public void updateCapacityForHost(final Host host, final Map capacities = _capacityDao.listByHostIdTypes(host.getId(), List.of(Capacity.CAPACITY_TYPE_CPU, + Capacity.CAPACITY_TYPE_MEMORY, + CapacityVO.CAPACITY_TYPE_CPU_CORE)); + CapacityVO cpuCap = null; + CapacityVO memCap = null; + CapacityVO cpuCoreCap = null; + for (CapacityVO c : capacities) { + if (c.getCapacityType() == Capacity.CAPACITY_TYPE_CPU) { + cpuCap = c; + } else if (c.getCapacityType() == Capacity.CAPACITY_TYPE_MEMORY) { + memCap = c; + } else if (c.getCapacityType() == Capacity.CAPACITY_TYPE_CPU_CORE) { + cpuCoreCap = c; + } + if (ObjectUtils.allNotNull(cpuCap, memCap, cpuCoreCap)) { + break; + } + } if (cpuCoreCap != null) { long hostTotalCpuCore = host.getCpus().longValue(); @@ -1006,8 +1045,8 @@ private void createCapacityEntry(StartupCommand startup, HostVO server) { capacityCPU.addAnd("podId", SearchCriteria.Op.EQ, server.getPodId()); capacityCPU.addAnd("capacityType", SearchCriteria.Op.EQ, Capacity.CAPACITY_TYPE_CPU); List capacityVOCpus = _capacityDao.search(capacitySC, null); - Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), "cpuOvercommitRatio").getValue()); - Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), "memoryOvercommitRatio").getValue()); + Float cpuovercommitratio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), VmDetailConstants.CPU_OVER_COMMIT_RATIO).getValue()); + Float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(server.getClusterId(), VmDetailConstants.MEMORY_OVER_COMMIT_RATIO).getValue()); if (capacityVOCpus != null && !capacityVOCpus.isEmpty()) { CapacityVO CapacityVOCpu = capacityVOCpus.get(0); @@ -1064,9 +1103,9 @@ public float getClusterOverProvisioningFactor(Long clusterId, short capacityType String capacityOverProvisioningName = ""; if (capacityType == Capacity.CAPACITY_TYPE_CPU) { - capacityOverProvisioningName = "cpuOvercommitRatio"; + capacityOverProvisioningName = VmDetailConstants.CPU_OVER_COMMIT_RATIO; } else if (capacityType == Capacity.CAPACITY_TYPE_MEMORY) { - capacityOverProvisioningName = "memoryOvercommitRatio"; + capacityOverProvisioningName = VmDetailConstants.MEMORY_OVER_COMMIT_RATIO; } else { throw new CloudRuntimeException("Invalid capacityType - " + capacityType); } @@ -1107,13 +1146,11 @@ public boolean checkIfClusterCrossesThreshold(Long clusterId, Integer cpuRequest public Pair checkIfHostHasCpuCapabilityAndCapacity(Host host, ServiceOffering offering, boolean considerReservedCapacity) { int cpu_requested = offering.getCpu() * offering.getSpeed(); long ram_requested = offering.getRamSize() * 1024L * 1024L; - Cluster cluster = _clusterDao.findById(host.getClusterId()); - ClusterDetailsVO clusterDetailsCpuOvercommit = _clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); - ClusterDetailsVO clusterDetailsRamOvercommmt = _clusterDetailsDao.findDetail(cluster.getId(), "memoryOvercommitRatio"); - Float cpuOvercommitRatio = Float.parseFloat(clusterDetailsCpuOvercommit.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(clusterDetailsRamOvercommmt.getValue()); + Pair clusterDetails = getClusterValues(host.getClusterId()); + Float cpuOvercommitRatio = Float.parseFloat(clusterDetails.first()); + Float memoryOvercommitRatio = Float.parseFloat(clusterDetails.second()); - boolean hostHasCpuCapability = checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); + boolean hostHasCpuCapability = checkIfHostHasCpuCapability(host, offering.getCpu(), offering.getSpeed()); boolean hostHasCapacity = checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, considerReservedCapacity); @@ -1254,6 +1291,7 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {CpuOverprovisioningFactor, MemOverprovisioningFactor, StorageCapacityDisableThreshold, StorageOverprovisioningFactor, - StorageAllocatedCapacityDisableThreshold, StorageOperationsExcludeCluster, ImageStoreNFSVersion, SecondaryStorageCapacityThreshold}; + StorageAllocatedCapacityDisableThreshold, StorageOperationsExcludeCluster, ImageStoreNFSVersion, SecondaryStorageCapacityThreshold, + CapacityCalculateWorkers }; } } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 25cbd10de0a4..7825ff9a684e 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -16,6 +16,10 @@ // under the License. package com.cloud.configuration; +import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; +import static com.cloud.offering.NetworkOffering.RoutingMode.Dynamic; +import static com.cloud.offering.NetworkOffering.RoutingMode.Static; + import java.io.UnsupportedEncodingException; import java.net.URI; import java.net.URISyntaxException; @@ -306,10 +310,6 @@ import com.googlecode.ipv6.IPv6Address; import com.googlecode.ipv6.IPv6Network; -import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; -import static com.cloud.offering.NetworkOffering.RoutingMode.Dynamic; -import static com.cloud.offering.NetworkOffering.RoutingMode.Static; - public class ConfigurationManagerImpl extends ManagerBase implements ConfigurationManager, ConfigurationService, Configurable { public static final String PERACCOUNT = "peraccount"; public static final String PERZONE = "perzone"; @@ -2428,7 +2428,7 @@ protected void checkIfZoneIsDeletable(final long zoneId) { // Check if there are any non-removed hosts in the zone. - if (!_hostDao.listByDataCenterId(zoneId).isEmpty()) { + if (!_hostDao.listEnabledIdsByDataCenterId(zoneId).isEmpty()) { throw new CloudRuntimeException(errorMsg + "there are servers in this zone."); } diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 53f76f8ad420..9311ee9c8705 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -873,10 +873,10 @@ private void allocCapacity(long dataCenterId) { } public boolean isZoneReady(Map zoneHostInfoMap, long dataCenterId) { - List hosts = hostDao.listByDataCenterId(dataCenterId); - if (CollectionUtils.isEmpty(hosts)) { + Integer totalUpAndEnabledHosts = hostDao.countUpAndEnabledHostsInZone(dataCenterId); + if (totalUpAndEnabledHosts != null && totalUpAndEnabledHosts < 1) { if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); + logger.debug("Zone {} has no host available which is enabled and in Up state", dataCenterId); } return false; } @@ -898,8 +898,8 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen if (templateHostRef != null) { Boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId)); - List> l = consoleProxyDao.getDatacenterStoragePoolHostInfo(dataCenterId, useLocalStorage); - if (CollectionUtils.isNotEmpty(l) && l.get(0).second() > 0) { + boolean hasDatacenterStoragePoolHostInfo = consoleProxyDao.hasDatacenterStoragePoolHostInfo(dataCenterId, !useLocalStorage); + if (hasDatacenterStoragePoolHostInfo) { return true; } else { if (logger.isDebugEnabled()) { diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 19760e6d0251..9d4ebf04fb73 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -36,23 +36,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.cpu.CPU; -import com.cloud.vm.UserVmManager; import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.user.AccountVO; -import com.cloud.user.dao.AccountDao; -import com.cloud.exception.StorageUnavailableException; -import com.cloud.utils.db.Filter; -import com.cloud.utils.fsm.StateMachine2; - -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.Configurable; -import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; -import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; @@ -65,6 +49,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageSubscriber; @@ -72,6 +58,10 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.StringUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -86,6 +76,7 @@ import com.cloud.capacity.dao.CapacityDao; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManagerImpl; +import com.cloud.cpu.CPU; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; @@ -103,6 +94,7 @@ import com.cloud.exception.AffinityConflictException; import com.cloud.exception.ConnectionException; import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.StorageUnavailableException; import com.cloud.gpu.GPU; import com.cloud.host.DetailVO; import com.cloud.host.Host; @@ -123,15 +115,19 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; +import com.cloud.user.dao.AccountDao; import com.cloud.utils.DateUtil; import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; @@ -139,13 +135,16 @@ import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; +import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.DiskProfile; +import com.cloud.vm.UserVmManager; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Event; @@ -298,7 +297,7 @@ protected void avoidOtherClustersForDeploymentIfMigrationDisabled(VirtualMachine final Long lastHostClusterId = lastHost.getClusterId(); logger.warn(String.format("VM last host ID: %d belongs to zone ID: %s for which config - %s is false and storage migration would be needed for inter-cluster migration, therefore, adding all other clusters except ID: %d from this zone to avoid list", lastHost.getId(), vm.getDataCenterId(), ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS.key(), lastHostClusterId)); - List clusterIds = _clusterDao.listAllClusters(lastHost.getDataCenterId()); + List clusterIds = _clusterDao.listAllClusterIds(lastHost.getDataCenterId()); Set existingAvoidedClusters = avoids.getClustersToAvoid(); clusterIds = clusterIds.stream().filter(x -> !Objects.equals(x, lastHostClusterId) && (existingAvoidedClusters == null || !existingAvoidedClusters.contains(x))).collect(Collectors.toList()); avoids.addClusterList(clusterIds); @@ -496,7 +495,7 @@ private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, De float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); boolean hostHasCpuCapability, hostHasCapacity = false; - hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); + hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host, offering.getCpu(), offering.getSpeed()); if (hostHasCpuCapability) { // first check from reserved capacity @@ -742,12 +741,11 @@ protected boolean isAdminVmDeployableInDisabledResources() { * Adds disabled Hosts to the ExcludeList in order to avoid them at the deployment planner. */ protected void avoidDisabledHosts(DataCenter dc, ExcludeList avoids) { - List disabledHosts = _hostDao.listDisabledByDataCenterId(dc.getId()); - logger.debug(() -> String.format("Adding hosts [%s] of datacenter [%s] to the avoid set, because these hosts are in the Disabled state.", - disabledHosts.stream().map(HostVO::getUuid).collect(Collectors.joining(", ")), dc.getUuid())); - for (HostVO host : disabledHosts) { - avoids.addHost(host.getId()); - } + + List disabledHostIds = _hostDao.listDisabledIdsByDataCenterId(dc.getId()); + logger.debug("Adding hosts %s of datacenter [%s] to the avoid set, because these hosts are in the Disabled state.", + StringUtils.join(disabledHostIds), dc.getUuid()); + disabledHostIds.forEach(avoids::addHost); } /** @@ -865,7 +863,7 @@ public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataC List allDedicatedPods = _dedicatedDao.listAllPods(); allPodsInDc.retainAll(allDedicatedPods); - List allClustersInDc = _clusterDao.listAllClusters(dc.getId()); + List allClustersInDc = _clusterDao.listAllClusterIds(dc.getId()); List allDedicatedClusters = _dedicatedDao.listAllClusters(); allClustersInDc.retainAll(allDedicatedClusters); @@ -1152,11 +1150,13 @@ protected void runInContext() { private void checkHostReservations() { List reservedHosts = _plannerHostReserveDao.listAllReservedHosts(); - - for (PlannerHostReservationVO hostReservation : reservedHosts) { - HostVO host = _hostDao.findById(hostReservation.getHostId()); + List hosts = _hostDao.listByIds(reservedHosts + .stream() + .map(PlannerHostReservationVO::getHostId) + .collect(Collectors.toList())); + for (HostVO host : hosts) { if (host != null && host.getManagementServerId() != null && host.getManagementServerId() == _nodeId) { - checkHostReservationRelease(hostReservation.getHostId()); + checkHostReservationRelease(host.getId()); } } @@ -1344,7 +1344,7 @@ private DeployDestination checkClustersforDestination(List clusterList, Vi Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools, avoid, resourceUsageRequired, readyAndReusedVolumes, plan.getPreferredHosts(), vmProfile.getVirtualMachine()); if (potentialResources != null) { - Host host = _hostDao.findById(potentialResources.first().getId()); + Host host = potentialResources.first(); Map storageVolMap = potentialResources.second(); // remove the reused vol<->pool from destination, since // we don't have to prepare this volume. diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index ffd482b711d6..4209b0d23261 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -16,6 +16,29 @@ // under the License. package com.cloud.hypervisor.kvm.discoverer; +import static com.cloud.configuration.ConfigurationManagerImpl.ADD_HOST_ON_SERVICE_RESTART_KVM; + +import java.net.InetAddress; +import java.net.URI; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.agent.lb.IndirectAgentLB; +import org.apache.cloudstack.ca.CAManager; +import org.apache.cloudstack.ca.SetupCertificateCommand; +import org.apache.cloudstack.direct.download.DirectDownloadManager; +import org.apache.cloudstack.framework.ca.Certificate; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.cloudstack.utils.security.KeyStoreUtils; + import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -48,26 +71,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.ssh.SSHCmdHelper; import com.trilead.ssh2.Connection; -import org.apache.cloudstack.agent.lb.IndirectAgentLB; -import org.apache.cloudstack.ca.CAManager; -import org.apache.cloudstack.ca.SetupCertificateCommand; -import org.apache.cloudstack.direct.download.DirectDownloadManager; -import org.apache.cloudstack.framework.ca.Certificate; -import org.apache.cloudstack.utils.security.KeyStoreUtils; -import javax.inject.Inject; -import javax.naming.ConfigurationException; -import java.net.InetAddress; -import java.net.URI; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import static com.cloud.configuration.ConfigurationManagerImpl.ADD_HOST_ON_SERVICE_RESTART_KVM; public abstract class LibvirtServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { private final int _waitTime = 5; /* wait for 5 minutes */ @@ -89,6 +93,16 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements @Inject private HostDao hostDao; + private LazyCache clusterExistingHostCache; + + private HostVO getExistingHostForCluster(long clusterId) { + HostVO existingHostInCluster = _hostDao.findAnyStateHypervisorHostInCluster(clusterId); + if (existingHostInCluster != null) { + _hostDao.loadDetails(existingHostInCluster); + } + return existingHostInCluster; + } + @Override public abstract Hypervisor.HypervisorType getHypervisorType(); @@ -425,6 +439,9 @@ public boolean configure(String name, Map params) throws Configu _kvmGuestNic = _kvmPrivateNic; } + clusterExistingHostCache = new LazyCache<>(32, 30, + this::getExistingHostForCluster); + agentMgr.registerForHostEvents(this, true, false, false); _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; @@ -467,11 +484,9 @@ public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { throw new IllegalArgumentException("cannot add host, due to can't find cluster: " + host.getClusterId()); } - List hostsInCluster = _resourceMgr.listAllHostsInCluster(clusterVO.getId()); - if (!hostsInCluster.isEmpty()) { - HostVO oneHost = hostsInCluster.get(0); - _hostDao.loadDetails(oneHost); - String hostOsInCluster = oneHost.getDetail("Host.OS"); + HostVO existingHostInCluster = clusterExistingHostCache.get(clusterVO.getId()); + if (existingHostInCluster != null) { + String hostOsInCluster = existingHostInCluster.getDetail("Host.OS"); String hostOs = ssCmd.getHostDetails().get("Host.OS"); if (!isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs)) { String msg = String.format("host: %s with hostOS, \"%s\"into a cluster, in which there are \"%s\" hosts added", firstCmd.getPrivateIpAddress(), hostOs, hostOsInCluster); diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java index 2ccd5fdd8202..8b3e23572380 100644 --- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java @@ -40,16 +40,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.bgp.BGPService; -import com.cloud.dc.VlanDetailsVO; -import com.cloud.dc.dao.VlanDetailsDao; -import com.cloud.network.dao.NsxProviderDao; -import com.cloud.network.dao.PublicIpQuarantineDao; -import com.cloud.network.dao.VirtualRouterProviderDao; -import com.cloud.network.element.NsxProviderVO; -import com.cloud.network.element.VirtualRouterProviderVO; -import com.cloud.offering.ServiceOffering; -import com.cloud.service.dao.ServiceOfferingDao; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.alert.AlertService; @@ -104,6 +94,7 @@ import com.cloud.api.ApiDBUtils; import com.cloud.api.query.dao.DomainRouterJoinDao; import com.cloud.api.query.vo.DomainRouterJoinVO; +import com.cloud.bgp.BGPService; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.Resource; @@ -114,12 +105,14 @@ import com.cloud.dc.DataCenterVnetVO; import com.cloud.dc.DomainVlanMapVO; import com.cloud.dc.Vlan.VlanType; +import com.cloud.dc.VlanDetailsVO; import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterVnetDao; import com.cloud.dc.dao.DomainVlanMapDao; import com.cloud.dc.dao.VlanDao; +import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.deploy.DeployDestination; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; @@ -165,6 +158,7 @@ import com.cloud.network.dao.NetworkDomainVO; import com.cloud.network.dao.NetworkServiceMapDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.OvsProviderDao; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; @@ -172,9 +166,13 @@ import com.cloud.network.dao.PhysicalNetworkTrafficTypeDao; import com.cloud.network.dao.PhysicalNetworkTrafficTypeVO; import com.cloud.network.dao.PhysicalNetworkVO; +import com.cloud.network.dao.PublicIpQuarantineDao; +import com.cloud.network.dao.VirtualRouterProviderDao; import com.cloud.network.element.NetworkElement; +import com.cloud.network.element.NsxProviderVO; import com.cloud.network.element.OvsProviderVO; import com.cloud.network.element.VirtualRouterElement; +import com.cloud.network.element.VirtualRouterProviderVO; import com.cloud.network.element.VpcVirtualRouterElement; import com.cloud.network.guru.GuestNetworkGuru; import com.cloud.network.guru.NetworkGuru; @@ -198,6 +196,7 @@ import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.network.vpc.dao.VpcOfferingDao; import com.cloud.offering.NetworkOffering; +import com.cloud.offering.ServiceOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; @@ -207,6 +206,7 @@ import com.cloud.server.ResourceTag; import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index 5e7a4a0c4efc..fd6ff18f4c72 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -2717,7 +2717,7 @@ protected void getVmStatsFromHosts(AutoScaleVmGroupTO groupTO) { return vmStatsById; } try { - vmStatsById = virtualMachineManager.getVirtualMachineStatistics(host.getId(), host.getName(), vmIds); + vmStatsById = virtualMachineManager.getVirtualMachineStatistics(host, vmIds); if (MapUtils.isEmpty(vmStatsById)) { logger.warn("Got empty result for virtual machine statistics from host: " + host); } diff --git a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java index b925137c4ce8..3147365e8ab6 100644 --- a/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java +++ b/server/src/main/java/com/cloud/network/security/SecurityGroupListener.java @@ -22,8 +22,8 @@ import java.util.Random; import java.util.concurrent.ConcurrentHashMap; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 228373896204..8c6123103443 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -32,20 +32,12 @@ import java.util.Locale; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.stream.Collectors; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.alert.AlertManager; -import com.cloud.cpu.CPU; -import com.cloud.exception.StorageConflictException; -import com.cloud.exception.StorageUnavailableException; -import com.cloud.host.HostTagVO; -import com.cloud.storage.Volume; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.hypervisor.HypervisorGuru; import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; @@ -92,6 +84,7 @@ import com.cloud.agent.api.VgpuTypesInfo; import com.cloud.agent.api.to.GPUDeviceTO; import com.cloud.agent.transport.Request; +import com.cloud.alert.AlertManager; import com.cloud.capacity.Capacity; import com.cloud.capacity.CapacityManager; import com.cloud.capacity.CapacityState; @@ -100,6 +93,7 @@ import com.cloud.cluster.ClusterManager; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; +import com.cloud.cpu.CPU; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; @@ -133,6 +127,8 @@ import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.StorageConflictException; +import com.cloud.exception.StorageUnavailableException; import com.cloud.gpu.GPU; import com.cloud.gpu.HostGpuGroupsVO; import com.cloud.gpu.VGPUTypesVO; @@ -144,6 +140,7 @@ import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.host.HostStats; +import com.cloud.host.HostTagVO; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.Status.Event; @@ -152,6 +149,7 @@ import com.cloud.host.dao.HostTagsDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorGuru; import com.cloud.hypervisor.kvm.discoverer.KvmDummyResourceBase; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; @@ -169,10 +167,13 @@ import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.StorageService; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.utils.Ternary; @@ -549,8 +550,8 @@ public List discoverCluster(final AddClusterCmd cmd) throws I details.put("ovm3pool", allParams.get("ovm3pool")); details.put("ovm3cluster", allParams.get("ovm3cluster")); } - details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString()); - details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString()); + details.put(VmDetailConstants.CPU_OVER_COMMIT_RATIO, CapacityManager.CpuOverprovisioningFactor.value().toString()); + details.put(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, CapacityManager.MemOverprovisioningFactor.value().toString()); _clusterDetailsDao.persist(cluster.getId(), details); return result; } @@ -560,8 +561,8 @@ public List discoverCluster(final AddClusterCmd cmd) throws I details.put("url", url); details.put("username", StringUtils.defaultString(username)); details.put("password", StringUtils.defaultString(password)); - details.put("cpuOvercommitRatio", CapacityManager.CpuOverprovisioningFactor.value().toString()); - details.put("memoryOvercommitRatio", CapacityManager.MemOverprovisioningFactor.value().toString()); + details.put(VmDetailConstants.CPU_OVER_COMMIT_RATIO, CapacityManager.CpuOverprovisioningFactor.value().toString()); + details.put(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, CapacityManager.MemOverprovisioningFactor.value().toString()); _clusterDetailsDao.persist(cluster.getId(), details); boolean success = false; @@ -645,8 +646,8 @@ public List discoverHosts(final AddHostCmd cmd) throws IllegalAr throw ex; } else { if (cluster.getGuid() == null) { - final List hosts = listAllHostsInCluster(clusterId); - if (!hosts.isEmpty()) { + final List hostIds = _hostDao.listIdsByClusterId(clusterId); + if (!hostIds.isEmpty()) { final CloudRuntimeException ex = new CloudRuntimeException("Guid is not updated for cluster with specified cluster id; need to wait for hosts in this cluster to come up"); ex.addProxyObject(cluster.getUuid(), "clusterId"); @@ -780,9 +781,9 @@ private List discoverHostsFull(final Long dcId, final Long podId, Long c } } clusterId = cluster.getId(); - if (_clusterDetailsDao.findDetail(clusterId, "cpuOvercommitRatio") == null) { - final ClusterDetailsVO cluster_cpu_detail = new ClusterDetailsVO(clusterId, "cpuOvercommitRatio", "1"); - final ClusterDetailsVO cluster_memory_detail = new ClusterDetailsVO(clusterId, "memoryOvercommitRatio", "1"); + if (_clusterDetailsDao.findDetail(clusterId, VmDetailConstants.CPU_OVER_COMMIT_RATIO) == null) { + final ClusterDetailsVO cluster_cpu_detail = new ClusterDetailsVO(clusterId, VmDetailConstants.CPU_OVER_COMMIT_RATIO, "1"); + final ClusterDetailsVO cluster_memory_detail = new ClusterDetailsVO(clusterId, VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, "1"); _clusterDetailsDao.persist(cluster_cpu_detail); _clusterDetailsDao.persist(cluster_memory_detail); } @@ -964,8 +965,8 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { Host hostRemoved = _hostDao.findById(hostId); _hostDao.remove(hostId); if (clusterId != null) { - final List hosts = listAllHostsInCluster(clusterId); - if (hosts.size() == 0) { + final List hostIds = _hostDao.listIdsByClusterId(clusterId); + if (CollectionUtils.isEmpty(hostIds)) { final ClusterVO cluster = _clusterDao.findById(clusterId); cluster.setGuid(null); _clusterDao.update(clusterId, cluster); @@ -1089,11 +1090,9 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { final Hypervisor.HypervisorType hypervisorType = cluster.getHypervisorType(); - final List hosts = listAllHostsInCluster(cmd.getId()); - if (hosts.size() > 0) { - if (logger.isDebugEnabled()) { - logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove"); - } + final List hostIds = _hostDao.listIdsByClusterId(cmd.getId()); + if (!hostIds.isEmpty()) { + logger.debug("Cluster: {} still has hosts, can't remove", cmd.getId()); throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has hosts"); } @@ -2433,10 +2432,10 @@ private void updateSupportsClonedVolumes(HostVO host, boolean supportsClonedVolu boolean clusterSupportsResigning = true; - List hostVOs = _hostDao.findByClusterId(host.getClusterId()); + List hostIds = _hostDao.listIdsByClusterId(host.getClusterId()); - for (HostVO hostVO : hostVOs) { - DetailVO hostDetailVO = _hostDetailsDao.findDetail(hostVO.getId(), name); + for (Long hostId : hostIds) { + DetailVO hostDetailVO = _hostDetailsDao.findDetail(hostId, name); if (hostDetailVO == null || Boolean.parseBoolean(hostDetailVO.getValue()) == false) { clusterSupportsResigning = false; @@ -3050,10 +3049,10 @@ private boolean doUpdateHostPassword(final long hostId) { public boolean updateClusterPassword(final UpdateHostPasswordCmd command) { final boolean shouldUpdateHostPasswd = command.getUpdatePasswdOnHost(); // get agents for the cluster - final List hosts = listAllHostsInCluster(command.getClusterId()); - for (final HostVO host : hosts) { + final List hostIds = _hostDao.listIdsByClusterId(command.getClusterId()); + for (final Long hostId : hostIds) { try { - final Boolean result = propagateResourceEvent(host.getId(), ResourceState.Event.UpdatePassword); + final Boolean result = propagateResourceEvent(hostId, ResourceState.Event.UpdatePassword); if (result != null) { return result; } @@ -3062,8 +3061,9 @@ public boolean updateClusterPassword(final UpdateHostPasswordCmd command) { } if (shouldUpdateHostPasswd) { - final boolean isUpdated = doUpdateHostPassword(host.getId()); + final boolean isUpdated = doUpdateHostPassword(hostId); if (!isUpdated) { + HostVO host = _hostDao.findById(hostId); throw new CloudRuntimeException( String.format("CloudStack failed to update the password of %s. Please make sure you are still able to connect to your hosts.", host)); } @@ -3278,26 +3278,13 @@ public List listAllHostsInAllZonesByType(final Type type) { } @Override - public List listAvailHypervisorInZone(final Long hostId, final Long zoneId) { - final SearchCriteria sc = _hypervisorsInDC.create(); - if (zoneId != null) { - sc.setParameters("dataCenter", zoneId); - } - if (hostId != null) { - // exclude the given host, since we want to check what hypervisor is already handled - // in adding this new host - sc.setParameters("id", hostId); + public List listAvailHypervisorInZone(final Long zoneId) { + List systemVMTemplates = _templateDao.listAllReadySystemVMTemplates(zoneId); + final Set hypervisors = new HashSet<>(); + for (final VMTemplateVO systemVMTemplate : systemVMTemplates) { + hypervisors.add(systemVMTemplate.getHypervisorType()); } - sc.setParameters("type", Host.Type.Routing); - - // The search is not able to return list of enums, so getting - // list of hypervisors as strings and then converting them to enum - final List hvs = _hostDao.customSearch(sc, null); - final List hypervisors = new ArrayList(); - for (final String hv : hvs) { - hypervisors.add(HypervisorType.getType(hv)); - } - return hypervisors; + return new ArrayList<>(hypervisors); } @Override @@ -3315,16 +3302,15 @@ public HostVO findHostByName(final String name) { } @Override - public HostStats getHostStatistics(final long hostId) { - final Answer answer = _agentMgr.easySend(hostId, new GetHostStatsCommand(_hostDao.findById(hostId).getGuid(), _hostDao.findById(hostId).getName(), hostId)); + public HostStats getHostStatistics(final Host host) { + final Answer answer = _agentMgr.easySend(host.getId(), new GetHostStatsCommand(host.getGuid(), host.getName(), host.getId())); if (answer != null && answer instanceof UnsupportedAnswer) { return null; } if (answer == null || !answer.getResult()) { - final String msg = "Unable to obtain host " + hostId + " statistics. "; - logger.warn(msg); + logger.warn("Unable to obtain host {} statistics.", host.getId()); return null; } else { diff --git a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java index c7bdf9c6f6c9..37dceed901b6 100644 --- a/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/RollingMaintenanceManagerImpl.java @@ -636,7 +636,7 @@ private Pair performCapacityChecksBeforeHostInMaintenance(Host continue; } boolean maxGuestLimit = capacityManager.checkIfHostReachMaxGuestLimit(host); - boolean hostHasCPUCapacity = capacityManager.checkIfHostHasCpuCapability(hostInCluster.getId(), serviceOffering.getCpu(), serviceOffering.getSpeed()); + boolean hostHasCPUCapacity = capacityManager.checkIfHostHasCpuCapability(hostInCluster, serviceOffering.getCpu(), serviceOffering.getSpeed()); int cpuRequested = serviceOffering.getCpu() * serviceOffering.getSpeed(); long ramRequested = serviceOffering.getRamSize() * 1024L * 1024L; ClusterDetailsVO clusterDetailsCpuOvercommit = clusterDetailsDao.findDetail(cluster.getId(), "cpuOvercommitRatio"); diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index b59ddc029ee1..cc649ed21ecb 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -93,7 +93,6 @@ import com.cloud.projects.ProjectAccount.Role; import com.cloud.projects.dao.ProjectAccountDao; import com.cloud.projects.dao.ProjectDao; -import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; @@ -105,7 +104,6 @@ import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.dao.VolumeDaoImpl.SumCount; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -118,6 +116,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase.SumCount; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.JoinBuilder; @@ -1291,16 +1290,14 @@ protected List getVmsWithAccountAndTag(long accountId, String tag) if (StringUtils.isEmpty(tag)) { return _userVmJoinDao.listByAccountServiceOfferingTemplateAndNotInState(accountId, states, null, null); } - List offerings = serviceOfferingDao.listByHostTag(tag); - List templates = _vmTemplateDao.listByTemplateTag(tag); + List offerings = serviceOfferingDao.listIdsByHostTag(tag); + List templates = _vmTemplateDao.listIdsByTemplateTag(tag); if (CollectionUtils.isEmpty(offerings) && CollectionUtils.isEmpty(templates)) { return new ArrayList<>(); } return _userVmJoinDao.listByAccountServiceOfferingTemplateAndNotInState(accountId, states, - offerings.stream().map(ServiceOfferingVO::getId).collect(Collectors.toList()), - templates.stream().map(VMTemplateVO::getId).collect(Collectors.toList()) - ); + offerings, templates); } protected List getVmsWithAccount(long accountId) { diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 87b3dd3cbd92..80c2f618ef3b 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -44,7 +44,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.utils.security.CertificateHelper; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroupProcessor; @@ -71,7 +70,6 @@ import org.apache.cloudstack.api.command.admin.bgp.CreateASNRangeCmd; import org.apache.cloudstack.api.command.admin.bgp.DeleteASNRangeCmd; import org.apache.cloudstack.api.command.admin.bgp.ListASNRangesCmd; -import org.apache.cloudstack.api.command.user.bgp.ListASNumbersCmd; import org.apache.cloudstack.api.command.admin.bgp.ReleaseASNumberCmd; import org.apache.cloudstack.api.command.admin.cluster.AddClusterCmd; import org.apache.cloudstack.api.command.admin.cluster.DeleteClusterCmd; @@ -297,6 +295,7 @@ import org.apache.cloudstack.api.command.admin.vm.DestroyVMCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.ExpungeVMCmd; import org.apache.cloudstack.api.command.admin.vm.GetVMUserDataCmd; +import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd; import org.apache.cloudstack.api.command.admin.vm.ListVMsCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.command.admin.vm.MigrateVirtualMachineWithVolumeCmd; @@ -376,6 +375,7 @@ import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmGroupCmd; import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmProfileCmd; import org.apache.cloudstack.api.command.user.autoscale.UpdateConditionCmd; +import org.apache.cloudstack.api.command.user.bgp.ListASNumbersCmd; import org.apache.cloudstack.api.command.user.bucket.CreateBucketCmd; import org.apache.cloudstack.api.command.user.bucket.DeleteBucketCmd; import org.apache.cloudstack.api.command.user.bucket.ListBucketsCmd; @@ -532,7 +532,6 @@ import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; import org.apache.cloudstack.api.command.user.vm.DestroyVMCmd; -import org.apache.cloudstack.api.command.admin.vm.ListAffectedVmsForStorageScopeChangeCmd; import org.apache.cloudstack.api.command.user.vm.GetVMPasswordCmd; import org.apache.cloudstack.api.command.user.vm.ListNicsCmd; import org.apache.cloudstack.api.command.user.vm.ListVMsCmd; @@ -825,6 +824,7 @@ import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.net.MacAddress; import com.cloud.utils.net.NetUtils; +import com.cloud.utils.security.CertificateHelper; import com.cloud.utils.ssh.SSHKeysHelper; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.DiskProfile; @@ -5048,7 +5048,7 @@ public String getVMPassword(GetVMPasswordCmd cmd) { private boolean updateHostsInCluster(final UpdateHostPasswordCmd command) { // get all the hosts in this cluster - final List hosts = _resourceMgr.listAllHostsInCluster(command.getClusterId()); + final List hostIds = _hostDao.listIdsByClusterId(command.getClusterId()); String userNameWithoutSpaces = StringUtils.deleteWhitespace(command.getUsername()); if (StringUtils.isBlank(userNameWithoutSpaces)) { @@ -5058,19 +5058,17 @@ private boolean updateHostsInCluster(final UpdateHostPasswordCmd command) { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { - for (final HostVO h : hosts) { - if (logger.isDebugEnabled()) { - logger.debug("Changing password for host name = " + h.getName()); - } + for (final Long hostId : hostIds) { + logger.debug("Changing password for host ID: {}", hostId); // update password for this host - final DetailVO nv = _detailsDao.findDetail(h.getId(), ApiConstants.USERNAME); + final DetailVO nv = _detailsDao.findDetail(hostId, ApiConstants.USERNAME); if (nv == null) { - final DetailVO nvu = new DetailVO(h.getId(), ApiConstants.USERNAME, userNameWithoutSpaces); + final DetailVO nvu = new DetailVO(hostId, ApiConstants.USERNAME, userNameWithoutSpaces); _detailsDao.persist(nvu); - final DetailVO nvp = new DetailVO(h.getId(), ApiConstants.PASSWORD, DBEncryptionUtil.encrypt(command.getPassword())); + final DetailVO nvp = new DetailVO(hostId, ApiConstants.PASSWORD, DBEncryptionUtil.encrypt(command.getPassword())); _detailsDao.persist(nvp); } else if (nv.getValue().equals(userNameWithoutSpaces)) { - final DetailVO nvp = _detailsDao.findDetail(h.getId(), ApiConstants.PASSWORD); + final DetailVO nvp = _detailsDao.findDetail(hostId, ApiConstants.PASSWORD); nvp.setValue(DBEncryptionUtil.encrypt(command.getPassword())); _detailsDao.persist(nvp); } else { diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 70959b56cfde..4c88fa24326e 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -42,6 +42,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -131,6 +132,7 @@ import com.cloud.user.dao.VmDiskStatisticsDao; import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.component.ComponentMethodInterceptable; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -147,7 +149,6 @@ import com.cloud.utils.script.Script; import com.cloud.vm.NicVO; import com.cloud.vm.UserVmManager; -import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; @@ -623,17 +624,21 @@ protected int retrieveExternalStatsPortFromUri(URI uri) throws URISyntaxExceptio externalStatsPrefix, externalStatsHost, externalStatsPort)); } - protected Map getVmMapForStatsForHost(Host host) { + protected Pair, Map> getVmMapForStatsForHost(Host host) { List vms = _vmInstance.listByHostAndState(host.getId(), VirtualMachine.State.Running); boolean collectUserVMStatsOnly = Boolean.TRUE.equals(vmStatsCollectUserVMOnly.value()); - Map vmMap = new HashMap<>(); - for (VMInstanceVO vm : vms) { - if (collectUserVMStatsOnly && !VirtualMachine.Type.User.equals(vm.getType())) { - continue; + if (collectUserVMStatsOnly) { + vms = vms.stream().filter(vm -> VirtualMachine.Type.User.equals(vm.getType())).collect(Collectors.toList()); + } + Map idInstanceMap = new HashMap<>(); + Map instanceNameIdMap = new HashMap<>(); + vms.forEach(vm -> { + if (!collectUserVMStatsOnly || VirtualMachine.Type.User.equals(vm.getType())) { + idInstanceMap.put(vm.getId(), vm); + instanceNameIdMap.put(vm.getInstanceName(), vm.getId()); } - vmMap.put(vm.getId(), vm); - } - return vmMap; + }); + return new Pair<>(idInstanceMap, instanceNameIdMap); } class HostCollector extends AbstractStatsCollector { @@ -647,7 +652,7 @@ protected void runInContext() { Map metrics = new HashMap<>(); for (HostVO host : hosts) { - HostStatsEntry hostStatsEntry = (HostStatsEntry) _resourceMgr.getHostStatistics(host.getId()); + HostStatsEntry hostStatsEntry = (HostStatsEntry) _resourceMgr.getHostStatistics(host); if (hostStatsEntry != null) { hostStatsEntry.setHostVo(host); metrics.put(hostStatsEntry.getHostId(), hostStatsEntry); @@ -1195,37 +1200,39 @@ protected void runInContext() { Map metrics = new HashMap<>(); for (HostVO host : hosts) { Date timestamp = new Date(); - Map vmMap = getVmMapForStatsForHost(host); + Pair, Map> vmsAndMap = getVmMapForStatsForHost(host); + Map vmMap = vmsAndMap.first(); try { - Map vmStatsById = virtualMachineManager.getVirtualMachineStatistics(host.getId(), host.getName(), vmMap); - - if (vmStatsById != null) { - Set vmIdSet = vmStatsById.keySet(); - for (Long vmId : vmIdSet) { - VmStatsEntry statsForCurrentIteration = (VmStatsEntry)vmStatsById.get(vmId); - statsForCurrentIteration.setVmId(vmId); - VMInstanceVO vm = vmMap.get(vmId); - statsForCurrentIteration.setVmUuid(vm.getUuid()); - - persistVirtualMachineStats(statsForCurrentIteration, timestamp); - - if (externalStatsType == ExternalStatsProtocol.GRAPHITE) { - prepareVmMetricsForGraphite(metrics, statsForCurrentIteration); - } else { - metrics.put(statsForCurrentIteration.getVmId(), statsForCurrentIteration); - } + Map vmStatsById = virtualMachineManager.getVirtualMachineStatistics( + host, vmsAndMap.second()); + if (MapUtils.isEmpty(vmStatsById)) { + continue; + } + Set vmIdSet = vmStatsById.keySet(); + for (Long vmId : vmIdSet) { + VmStatsEntry statsForCurrentIteration = (VmStatsEntry)vmStatsById.get(vmId); + statsForCurrentIteration.setVmId(vmId); + VMInstanceVO vm = vmMap.get(vmId); + statsForCurrentIteration.setVmUuid(vm.getUuid()); + + persistVirtualMachineStats(statsForCurrentIteration, timestamp); + + if (externalStatsType == ExternalStatsProtocol.GRAPHITE) { + prepareVmMetricsForGraphite(metrics, statsForCurrentIteration); + } else { + metrics.put(statsForCurrentIteration.getVmId(), statsForCurrentIteration); } + } - if (!metrics.isEmpty()) { - if (externalStatsType == ExternalStatsProtocol.GRAPHITE) { - sendVmMetricsToGraphiteHost(metrics, host); - } else if (externalStatsType == ExternalStatsProtocol.INFLUXDB) { - sendMetricsToInfluxdb(metrics); - } + if (!metrics.isEmpty()) { + if (externalStatsType == ExternalStatsProtocol.GRAPHITE) { + sendVmMetricsToGraphiteHost(metrics, host); + } else if (externalStatsType == ExternalStatsProtocol.INFLUXDB) { + sendMetricsToInfluxdb(metrics); } - - metrics.clear(); } + + metrics.clear(); } catch (Exception e) { logger.debug("Failed to get VM stats for host with ID: " + host.getId()); continue; @@ -1413,8 +1420,10 @@ protected void runInContext() { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - Map vmMap = getVmMapForStatsForHost(host); - HashMap> vmDiskStatsById = virtualMachineManager.getVmDiskStatistics(host.getId(), host.getName(), vmMap); + Pair, Map> vmsAndMap = getVmMapForStatsForHost(host); + Map vmMap = vmsAndMap.first(); + HashMap> vmDiskStatsById = + virtualMachineManager.getVmDiskStatistics(host, vmsAndMap.second()); if (vmDiskStatsById == null) return; @@ -1521,8 +1530,10 @@ protected void runInContext() { Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(TransactionStatus status) { - Map vmMap = getVmMapForStatsForHost(host); - HashMap> vmNetworkStatsById = virtualMachineManager.getVmNetworkStatistics(host.getId(), host.getName(), vmMap); + Pair, Map> vmsAndMap = getVmMapForStatsForHost(host); + Map vmMap = vmsAndMap.first(); + HashMap> vmNetworkStatsById = + virtualMachineManager.getVmNetworkStatistics(host, vmsAndMap.second()); if (vmNetworkStatsById == null) return; @@ -1531,9 +1542,9 @@ public void doInTransactionWithoutResult(TransactionStatus status) { List vmNetworkStats = vmNetworkStatsById.get(vmId); if (CollectionUtils.isEmpty(vmNetworkStats)) continue; - UserVmVO userVm = _userVmDao.findById(vmId); - if (userVm == null) { - logger.debug("Cannot find uservm with id: " + vmId + " , continue"); + VMInstanceVO userVm = vmMap.get(vmId); + if (!VirtualMachine.Type.User.equals(userVm.getType())) { + logger.debug("Cannot find uservm with id: {} , continue", vmId); continue; } logger.debug("Now we are updating the user_statistics table for VM: " + userVm.getInstanceName() diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 2ed6be39b543..e6ad7176b245 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -42,10 +42,14 @@ import java.util.Random; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import javax.inject.Inject; @@ -62,8 +66,8 @@ import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.SyncStoragePoolCmd; -import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateObjectStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.CreateSecondaryStorageSelectorCmd; import org.apache.cloudstack.api.command.admin.storage.heuristics.RemoveSecondaryStorageSelectorCmd; @@ -86,6 +90,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; @@ -234,8 +239,8 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; -import com.cloud.utils.UriUtils; import com.cloud.utils.StringUtils; +import com.cloud.utils.UriUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -1540,6 +1545,80 @@ protected String getStoragePoolNonDestroyedVolumesLog(long storagePoolId) { return sb.toString(); } + protected void cleanupConnectedHostConnectionForFailedStorage(DataStore primaryStore, List poolHostIds) { + for (Long hostId : poolHostIds) { + try { + disconnectHostFromSharedPool(hostId, primaryStore.getId()); + } catch (StorageUnavailableException | StorageConflictException e) { + logger.error("Error during cleaning up failed storage host connection", e); + } + } + } + + @Override + public void connectHostsToPool(DataStore primaryStore, List hostIds, Scope scope, + boolean handleExceptionsPartially, boolean errorOnNoUpHost) throws CloudRuntimeException { + if (CollectionUtils.isEmpty(hostIds)) { + return; + } + CopyOnWriteArrayList poolHostIds = new CopyOnWriteArrayList<>(); + ExecutorService executorService = Executors.newFixedThreadPool(Math.max(1, Math.min(hostIds.size(), + StoragePoolHostConnectWorkers.value()))); + List> futures = new ArrayList<>(); + AtomicBoolean exceptionOccurred = new AtomicBoolean(false); + for (Long hostId : hostIds) { + futures.add(executorService.submit(() -> { + if (exceptionOccurred.get()) { + return null; + } + try { + connectHostToSharedPool(hostId, primaryStore.getId()); + poolHostIds.add(hostId); + } catch (Exception e) { + if (handleExceptionsPartially && e.getCause() instanceof StorageConflictException) { + exceptionOccurred.set(true); + throw e; + } + HostVO host = _hostDao.findById(hostId); + logger.warn("Unable to establish a connection between {} and {}", host, primaryStore, e); + String reason = getStoragePoolMountFailureReason(e.getMessage()); + if (handleExceptionsPartially && reason != null) { + exceptionOccurred.set(true); + throw new CloudRuntimeException(reason); + } + } + return null; + })); + } + for (Future future : futures) { + try { + future.get(); + } catch (Exception e) { + Throwable cause = e.getCause(); + if (cause instanceof StorageConflictException || cause instanceof CloudRuntimeException) { + executorService.shutdown(); + cleanupConnectedHostConnectionForFailedStorage(primaryStore, poolHostIds); + primaryStoreDao.expunge(primaryStore.getId()); + if (cause instanceof CloudRuntimeException) { + throw (CloudRuntimeException)cause; + } + throw new CloudRuntimeException("Storage has already been added as local storage", e); + } else { + logger.error("Error occurred while connecting host to shared pool", e); + } + } + } + executorService.shutdown(); + if (poolHostIds.isEmpty()) { + logger.warn("No host can access storage pool {} in {}: {}.", + primaryStore, scope.getScopeType(), scope.getScopeId()); + if (errorOnNoUpHost) { + primaryStoreDao.expunge(primaryStore.getId()); + throw new CloudRuntimeException("Failed to access storage pool"); + } + } + } + @Override public boolean connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); @@ -4050,7 +4129,8 @@ public ConfigKey[] getConfigKeys() { MountDisabledStoragePool, VmwareCreateCloneFull, VmwareAllowParallelExecution, - DataStoreDownloadFollowRedirects + DataStoreDownloadFollowRedirects, + StoragePoolHostConnectWorkers }; } diff --git a/server/src/main/java/com/cloud/storage/download/DownloadListener.java b/server/src/main/java/com/cloud/storage/download/DownloadListener.java index bd0c0eff1bce..7d17a69cf7d7 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadListener.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadListener.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.storage.download; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -24,10 +25,6 @@ import javax.inject.Inject; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -41,6 +38,10 @@ import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadProgressCommand; import org.apache.cloudstack.storage.command.DownloadProgressCommand.RequestType; +import org.apache.cloudstack.utils.cache.LazyCache; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -54,7 +55,7 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.exception.ConnectionException; import com.cloud.host.Host; -import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ResourceManager; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.download.DownloadState.DownloadEvent; @@ -136,6 +137,20 @@ protected void runInContext() { @Inject private VolumeService _volumeSrv; + private LazyCache> zoneHypervisorsCache; + + private List listAvailHypervisorInZone(long zoneId) { + if (_resourceMgr == null) { + return Collections.emptyList(); + } + return _resourceMgr.listAvailHypervisorInZone(zoneId); + } + + protected void initZoneHypervisorsCache() { + zoneHypervisorsCache = + new LazyCache<>(32, 30, this::listAvailHypervisorInZone); + } + // TODO: this constructor should be the one used for template only, remove other template constructor later public DownloadListener(EndPoint ssAgent, DataStore store, DataObject object, Timer timer, DownloadMonitorImpl downloadMonitor, DownloadCommand cmd, AsyncCompletionCallback callback) { @@ -151,6 +166,12 @@ public DownloadListener(EndPoint ssAgent, DataStore store, DataObject object, Ti _callback = callback; DownloadAnswer answer = new DownloadAnswer("", Status.NOT_DOWNLOADED); callback(answer); + initZoneHypervisorsCache(); + } + + public DownloadListener(DownloadMonitorImpl monitor) { + _downloadMonitor = monitor; + initZoneHypervisorsCache(); } public AsyncCompletionCallback getCallback() { @@ -210,10 +231,6 @@ public void log(String message, Level level) { logger.log(level, message + ", " + object.getType() + ": " + object.getId() + " at host " + _ssAgent.getId()); } - public DownloadListener(DownloadMonitorImpl monitor) { - _downloadMonitor = monitor; - } - @Override public boolean isRecurring() { return false; @@ -278,14 +295,15 @@ public void processHostAdded(long hostId) { @Override public void processConnect(Host agent, StartupCommand cmd, boolean forRebalance) throws ConnectionException { if (cmd instanceof StartupRoutingCommand) { - List hypers = _resourceMgr.listAvailHypervisorInZone(agent.getId(), agent.getDataCenterId()); - HypervisorType hostHyper = agent.getHypervisorType(); - if (hypers.contains(hostHyper)) { + List hypervisors = zoneHypervisorsCache.get(agent.getDataCenterId()); + Hypervisor.HypervisorType hostHyper = agent.getHypervisorType(); + if (hypervisors.contains(hostHyper)) { return; } _imageSrv.handleSysTemplateDownload(hostHyper, agent.getDataCenterId()); // update template_zone_ref for cross-zone templates _imageSrv.associateCrosszoneTemplatesToZone(agent.getDataCenterId()); + } /* This can be removed else if ( cmd instanceof StartupStorageCommand) { diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 219feeef5a91..a0e0df1844f7 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2112,7 +2112,7 @@ private boolean upgradeRunningVirtualMachine(Long vmId, Long newServiceOfferingI // #1 Check existing host has capacity & and the correct tags if (!excludes.shouldAvoid(ApiDBUtils.findHostById(vmInstance.getHostId()))) { - existingHostHasCapacity = _capacityMgr.checkIfHostHasCpuCapability(vmInstance.getHostId(), newCpu, newSpeed) + existingHostHasCapacity = _capacityMgr.checkIfHostHasCpuCapability(host, newCpu, newSpeed) && _capacityMgr.checkIfHostHasCapacity(vmInstance.getHostId(), cpuDiff, ByteScaleUtils.mebibytesToBytes(memoryDiff), false, _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_CPU), _capacityMgr.getClusterOverProvisioningFactor(host.getClusterId(), Capacity.CAPACITY_TYPE_MEMORY), false) diff --git a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java index 60e7093c48b3..719c7e13614f 100644 --- a/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/acl/RoleManagerImpl.java @@ -594,7 +594,7 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {RoleService.EnableDynamicApiChecker}; + return new ConfigKey[] {EnableDynamicApiChecker, DynamicApiCheckerCachePeriod}; } @Override diff --git a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java index 8b05a76d0a96..84c3081bfc1b 100644 --- a/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImpl.java @@ -18,9 +18,7 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Comparator; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,17 +32,19 @@ import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import org.apache.commons.lang3.StringUtils; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.host.Host; -import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ResourceState; import com.cloud.utils.component.ComponentLifecycleBase; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.commons.lang3.StringUtils; public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implements IndirectAgentLB, Configurable { @@ -60,11 +60,23 @@ public class IndirectAgentLBServiceImpl extends ComponentLifecycleBase implement private static Map algorithmMap = new HashMap<>(); + @Inject + private DataCenterDao dataCenterDao; + @Inject + private ClusterDao clusterDao; @Inject private HostDao hostDao; @Inject private AgentManager agentManager; + private static final List agentValidResourceStates = List.of( + ResourceState.Enabled, ResourceState.Maintenance, ResourceState.Disabled, + ResourceState.ErrorInMaintenance, ResourceState.PrepareForMaintenance); + private static final List agentValidHostTypes = List.of(Host.Type.Routing, Host.Type.ConsoleProxy, + Host.Type.SecondaryStorage, Host.Type.SecondaryStorageVM); + private static final List agentValidHypervisorTypes = List.of( + Hypervisor.HypervisorType.KVM, Hypervisor.HypervisorType.LXC); + ////////////////////////////////////////////////////// /////////////// Agent MSLB Methods /////////////////// ////////////////////////////////////////////////////// @@ -76,22 +88,22 @@ public List getManagementServerList(final Long hostId, final Long dcId, throw new CloudRuntimeException(String.format("No management server addresses are defined in '%s' setting", ApiServiceConfiguration.ManagementServerAddresses.key())); } + final List msList = Arrays.asList(msServerAddresses.replace(" ", "").split(",")); + if (msList.size() == 1) { + return msList; + } + final org.apache.cloudstack.agent.lb.IndirectAgentLBAlgorithm algorithm = getAgentMSLBAlgorithm(); List hostIdList = orderedHostIdList; if (hostIdList == null) { - hostIdList = getOrderedHostIdList(dcId); + hostIdList = algorithm.isHostListNeeded() ? getOrderedHostIdList(dcId) : new ArrayList<>(); } // just in case we have a host in creating state make sure it is in the list: if (null != hostId && ! hostIdList.contains(hostId)) { - if (logger.isTraceEnabled()) { - logger.trace("adding requested host to host list as it does not seem to be there; " + hostId); - } + logger.trace("adding requested host to host list as it does not seem to be there; {}", hostId); hostIdList.add(hostId); } - - final org.apache.cloudstack.agent.lb.IndirectAgentLBAlgorithm algorithm = getAgentMSLBAlgorithm(); - final List msList = Arrays.asList(msServerAddresses.replace(" ", "").split(",")); return algorithm.sort(msList, hostIdList, hostId); } @@ -119,76 +131,14 @@ public Long getLBPreferredHostCheckInterval(final Long clusterId) { } List getOrderedHostIdList(final Long dcId) { - final List hostIdList = new ArrayList<>(); - for (final Host host : getAllAgentBasedHosts()) { - if (host.getDataCenterId() == dcId) { - hostIdList.add(host.getId()); - } - } - Collections.sort(hostIdList, new Comparator() { - @Override - public int compare(Long x, Long y) { - return Long.compare(x,y); - } - }); + final List hostIdList = getAllAgentBasedHostsFromDB(dcId, null); + hostIdList.sort(Comparator.comparingLong(x -> x)); return hostIdList; } - private List getAllAgentBasedHosts() { - final List allHosts = hostDao.listAll(); - if (allHosts == null) { - return new ArrayList<>(); - } - final List agentBasedHosts = new ArrayList<>(); - for (final Host host : allHosts) { - conditionallyAddHost(agentBasedHosts, host); - } - return agentBasedHosts; - } - - private void conditionallyAddHost(List agentBasedHosts, Host host) { - if (host == null) { - if (logger.isTraceEnabled()) { - logger.trace("trying to add no host to a list"); - } - return; - } - - EnumSet allowedStates = EnumSet.of( - ResourceState.Enabled, - ResourceState.Maintenance, - ResourceState.Disabled, - ResourceState.ErrorInMaintenance, - ResourceState.PrepareForMaintenance); - // so the remaining EnumSet disallowedStates = EnumSet.complementOf(allowedStates) - // would be {ResourceState.Creating, ResourceState.Error}; - if (!allowedStates.contains(host.getResourceState())) { - if (logger.isTraceEnabled()) { - logger.trace(String.format("host is in '%s' state, not adding to the host list, (id = %s)", host.getResourceState(), host.getUuid())); - } - return; - } - - if (host.getType() != Host.Type.Routing - && host.getType() != Host.Type.ConsoleProxy - && host.getType() != Host.Type.SecondaryStorage - && host.getType() != Host.Type.SecondaryStorageVM) { - if (logger.isTraceEnabled()) { - logger.trace(String.format("host is of wrong type, not adding to the host list, (id = %s, type = %s)", host.getUuid(), host.getType())); - } - return; - } - - if (host.getHypervisorType() != null - && ! (host.getHypervisorType() == Hypervisor.HypervisorType.KVM || host.getHypervisorType() == Hypervisor.HypervisorType.LXC)) { - - if (logger.isTraceEnabled()) { - logger.trace(String.format("hypervisor is not the right type, not adding to the host list, (id = %s, hypervisortype = %s)", host.getUuid(), host.getHypervisorType())); - } - return; - } - - agentBasedHosts.add(host); + private List getAllAgentBasedHostsFromDB(final Long zoneId, final Long clusterId) { + return hostDao.findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(zoneId, clusterId, + agentValidResourceStates, agentValidHostTypes, agentValidHypervisorTypes); } private org.apache.cloudstack.agent.lb.IndirectAgentLBAlgorithm getAgentMSLBAlgorithm() { @@ -208,18 +158,28 @@ private org.apache.cloudstack.agent.lb.IndirectAgentLBAlgorithm getAgentMSLBAlgo public void propagateMSListToAgents() { logger.debug("Propagating management server list update to agents"); final String lbAlgorithm = getLBAlgorithmName(); - final Map> dcOrderedHostsMap = new HashMap<>(); - for (final Host host : getAllAgentBasedHosts()) { - final Long dcId = host.getDataCenterId(); - if (!dcOrderedHostsMap.containsKey(dcId)) { - dcOrderedHostsMap.put(dcId, getOrderedHostIdList(dcId)); + List zones = dataCenterDao.listAll(); + for (DataCenterVO zone : zones) { + List zoneHostIds = new ArrayList<>(); + Map> clusterHostIdsMap = new HashMap<>(); + List clusterIds = clusterDao.listAllClusterIds(zone.getId()); + for (Long clusterId : clusterIds) { + List hostIds = getAllAgentBasedHostsFromDB(zone.getId(), clusterId); + clusterHostIdsMap.put(clusterId, hostIds); + zoneHostIds.addAll(hostIds); } - final List msList = getManagementServerList(host.getId(), host.getDataCenterId(), dcOrderedHostsMap.get(dcId)); - final Long lbCheckInterval = getLBPreferredHostCheckInterval(host.getClusterId()); - final SetupMSListCommand cmd = new SetupMSListCommand(msList, lbAlgorithm, lbCheckInterval); - final Answer answer = agentManager.easySend(host.getId(), cmd); - if (answer == null || !answer.getResult()) { - logger.warn(String.format("Failed to setup management servers list to the agent of %s", host)); + zoneHostIds.sort(Comparator.comparingLong(x -> x)); + for (Long clusterId : clusterIds) { + final Long lbCheckInterval = getLBPreferredHostCheckInterval(clusterId); + List hostIds = clusterHostIdsMap.get(clusterId); + for (Long hostId : hostIds) { + final List msList = getManagementServerList(hostId, zone.getId(), zoneHostIds); + final SetupMSListCommand cmd = new SetupMSListCommand(msList, lbAlgorithm, lbCheckInterval); + final Answer answer = agentManager.easySend(hostId, cmd); + if (answer == null || !answer.getResult()) { + logger.warn("Failed to setup management servers list to the agent of ID: {}", hostId); + } + } } } } diff --git a/server/src/main/java/org/apache/cloudstack/agent/lb/algorithm/IndirectAgentLBRoundRobinAlgorithm.java b/server/src/main/java/org/apache/cloudstack/agent/lb/algorithm/IndirectAgentLBRoundRobinAlgorithm.java index 63fea5e74e6e..49e6899aaeb1 100644 --- a/server/src/main/java/org/apache/cloudstack/agent/lb/algorithm/IndirectAgentLBRoundRobinAlgorithm.java +++ b/server/src/main/java/org/apache/cloudstack/agent/lb/algorithm/IndirectAgentLBRoundRobinAlgorithm.java @@ -56,4 +56,9 @@ public String getName() { public boolean compare(final List msList, final List receivedMsList) { return msList != null && receivedMsList != null && msList.equals(receivedMsList); } + + @Override + public boolean isHostListNeeded() { + return true; + } } diff --git a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java index 02600b87f290..87c7089c39ff 100644 --- a/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/outofbandmanagement/OutOfBandManagementServiceImpl.java @@ -283,11 +283,11 @@ && isOutOfBandManagementEnabledForCluster(host.getClusterId()) && isOutOfBandManagementEnabledForHost(host.getId()); } - public boolean transitionPowerStateToDisabled(List hosts) { + public boolean transitionPowerStateToDisabled(List hostIds) { boolean result = true; - for (Host host : hosts) { + for (Long hostId : hostIds) { result = result && transitionPowerState(OutOfBandManagement.PowerState.Event.Disabled, - outOfBandManagementDao.findByHost(host.getId())); + outOfBandManagementDao.findByHost(hostId)); } return result; } @@ -316,7 +316,7 @@ public OutOfBandManagementResponse enableOutOfBandManagement(final DataCenter zo @ActionEvent(eventType = EventTypes.EVENT_HOST_OUTOFBAND_MANAGEMENT_DISABLE, eventDescription = "disabling out-of-band management on a zone") public OutOfBandManagementResponse disableOutOfBandManagement(final DataCenter zone) { dataCenterDetailsDao.persist(zone.getId(), OOBM_ENABLED_DETAIL, String.valueOf(false)); - transitionPowerStateToDisabled(hostDao.findByDataCenterId(zone.getId())); + transitionPowerStateToDisabled(hostDao.listIdsByDataCenterId(zone.getId())); return buildEnableDisableResponse(false); } @@ -332,7 +332,7 @@ public OutOfBandManagementResponse enableOutOfBandManagement(final Cluster clust @ActionEvent(eventType = EventTypes.EVENT_HOST_OUTOFBAND_MANAGEMENT_DISABLE, eventDescription = "disabling out-of-band management on a cluster") public OutOfBandManagementResponse disableOutOfBandManagement(final Cluster cluster) { clusterDetailsDao.persist(cluster.getId(), OOBM_ENABLED_DETAIL, String.valueOf(false)); - transitionPowerStateToDisabled(hostDao.findByClusterId(cluster.getId())); + transitionPowerStateToDisabled(hostDao.listIdsByClusterId(cluster.getId())); return buildEnableDisableResponse(false); } @@ -352,7 +352,7 @@ public OutOfBandManagementResponse enableOutOfBandManagement(final Host host) { outOfBandManagementConfig.setEnabled(true); boolean updateResult = outOfBandManagementDao.update(outOfBandManagementConfig.getId(), (OutOfBandManagementVO) outOfBandManagementConfig); if (updateResult) { - transitionPowerStateToDisabled(Collections.singletonList(host)); + transitionPowerStateToDisabled(Collections.singletonList(host.getId())); } return buildEnableDisableResponse(true); } @@ -365,7 +365,7 @@ public OutOfBandManagementResponse disableOutOfBandManagement(final Host host) { outOfBandManagementConfig.setEnabled(false); boolean updateResult = outOfBandManagementDao.update(outOfBandManagementConfig.getId(), (OutOfBandManagementVO) outOfBandManagementConfig); if (updateResult) { - transitionPowerStateToDisabled(Collections.singletonList(host)); + transitionPowerStateToDisabled(Collections.singletonList(host.getId())); } return buildEnableDisableResponse(false); } @@ -576,10 +576,8 @@ protected void runInContext() { if (isOutOfBandManagementEnabled(host)) { submitBackgroundPowerSyncTask(host); } else if (outOfBandManagementHost.getPowerState() != OutOfBandManagement.PowerState.Disabled) { - if (transitionPowerStateToDisabled(Collections.singletonList(host))) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Out-of-band management was disabled in zone/cluster/host, disabled power state for %s", host)); - } + if (transitionPowerStateToDisabled(Collections.singletonList(host.getId()))) { + logger.debug("Out-of-band management was disabled in zone/cluster/host, disabled power state for {}", host); } } } diff --git a/server/src/test/java/com/cloud/capacity/CapacityManagerImplTest.java b/server/src/test/java/com/cloud/capacity/CapacityManagerImplTest.java new file mode 100644 index 000000000000..1b9e44e799c1 --- /dev/null +++ b/server/src/test/java/com/cloud/capacity/CapacityManagerImplTest.java @@ -0,0 +1,184 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.capacity; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.dc.ClusterDetailsDao; +import com.cloud.host.Host; +import com.cloud.offering.ServiceOffering; +import com.cloud.service.ServiceOfferingVO; +import com.cloud.service.dao.ServiceOfferingDao; +import com.cloud.utils.Pair; +import com.cloud.vm.VmDetailConstants; + +@RunWith(MockitoJUnitRunner.class) +public class CapacityManagerImplTest { + @Mock + ClusterDetailsDao clusterDetailsDao; + @Mock + ServiceOfferingDao serviceOfferingDao; + + @Spy + @InjectMocks + CapacityManagerImpl capacityManager = new CapacityManagerImpl(); + + + private Host host; + private ServiceOffering offering; + private static final long CLUSTER_ID = 1L; + private static final long HOST_ID = 100L; + private static final int OFFERING_CPU = 4; + private static final int OFFERING_CPU_SPEED = 2000; + private static final int OFFERING_MEMORY = 4096; + + @Before + public void setUp() { + host = mock(Host.class); + offering = mock(ServiceOffering.class); + when(host.getClusterId()).thenReturn(CLUSTER_ID); + when(host.getId()).thenReturn(HOST_ID); + when(offering.getCpu()).thenReturn(OFFERING_CPU); + when(offering.getSpeed()).thenReturn(OFFERING_CPU_SPEED); + when(offering.getRamSize()).thenReturn(OFFERING_MEMORY); + } + + @Test + public void testGetClusterValues() { + long clusterId = 1L; + String cpuOvercommit = "2.0"; + String memoryOvercommit = "1.0"; + Map clusterDetails = new HashMap<>(); + clusterDetails.put(VmDetailConstants.CPU_OVER_COMMIT_RATIO, cpuOvercommit); + clusterDetails.put(VmDetailConstants.MEMORY_OVER_COMMIT_RATIO, memoryOvercommit); + + when(clusterDetailsDao.findDetails(eq(clusterId), anyList())).thenReturn(clusterDetails); + + Pair result = capacityManager.getClusterValues(clusterId); + + assertEquals(cpuOvercommit, result.first()); + assertEquals(memoryOvercommit, result.second()); + verify(clusterDetailsDao).findDetails(eq(clusterId), anyList()); + } + + @Test + public void testGetServiceOfferingsMap() { + Long offering1Id = 1L; + ServiceOfferingVO offering1 = mock(ServiceOfferingVO.class); + when(offering1.getId()).thenReturn(offering1Id); + Long offering2Id = 2L; + ServiceOfferingVO offering2 = mock(ServiceOfferingVO.class); + when(offering2.getId()).thenReturn(offering2Id); + when(serviceOfferingDao.listAllIncludingRemoved()).thenReturn(List.of(offering1, offering2)); + Map result = capacityManager.getServiceOfferingsMap(); + assertEquals(2, result.size()); + assertEquals(offering1, result.get(offering1Id)); + assertEquals(offering2, result.get(offering2Id)); + verify(serviceOfferingDao).listAllIncludingRemoved(); + } + + @Test + public void testGetServiceOfferingsMapEmptyList() { + when(serviceOfferingDao.listAllIncludingRemoved()).thenReturn(Collections.emptyList()); + Map result = capacityManager.getServiceOfferingsMap(); + assertTrue(result.isEmpty()); + verify(serviceOfferingDao).listAllIncludingRemoved(); + } + + @Test + public void testCheckIfHostHasCpuCapabilityAndCapacity() { + Float cpuOvercommit = 2.0f; + Float memoryOvercommit = 1.5f; + Pair clusterDetails = new Pair<>(String.valueOf(cpuOvercommit), String.valueOf(memoryOvercommit)); + doReturn(clusterDetails).when(capacityManager).getClusterValues(CLUSTER_ID); + doReturn(true).when(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), + eq(OFFERING_CPU_SPEED)); + doReturn(true).when(capacityManager).checkIfHostHasCapacity(eq(HOST_ID), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + Pair result = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, false); + assertTrue(result.first()); + assertTrue(result.second()); + verify(capacityManager).getClusterValues(CLUSTER_ID); + verify(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), eq(OFFERING_CPU_SPEED)); + verify(capacityManager).checkIfHostHasCapacity(eq(HOST_ID), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), + eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + } + + @Test + public void testCheckIfHostHasNoCpuCapabilityButHasCapacity() { + Float cpuOvercommit = 1.5f; + Float memoryOvercommit = 1.2f; + Pair clusterDetails = new Pair<>(String.valueOf(cpuOvercommit), String.valueOf(memoryOvercommit)); + doReturn(clusterDetails).when(capacityManager).getClusterValues(CLUSTER_ID); + doReturn(false).when(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), + eq(OFFERING_CPU_SPEED)); + doReturn(true).when(capacityManager).checkIfHostHasCapacity(eq(HOST_ID), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + Pair result = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, false); + assertFalse(result.first()); + assertTrue(result.second()); + verify(capacityManager).getClusterValues(CLUSTER_ID); + verify(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), eq(OFFERING_CPU_SPEED)); + verify(capacityManager).checkIfHostHasCapacity(eq(HOST_ID), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), + eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + } + + @Test + public void testCheckIfHostHasCpuCapabilityButNoCapacity() { + Float cpuOvercommit = 2.0f; + Float memoryOvercommit = 1.5f; + Pair clusterDetails = new Pair<>(String.valueOf(cpuOvercommit), String.valueOf(memoryOvercommit)); + doReturn(clusterDetails).when(capacityManager).getClusterValues(CLUSTER_ID); + doReturn(true).when(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), + eq(OFFERING_CPU_SPEED)); + doReturn(false).when(capacityManager).checkIfHostHasCapacity(eq(HOST_ID), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + Pair result = capacityManager.checkIfHostHasCpuCapabilityAndCapacity(host, offering, false); + assertTrue(result.first()); + assertFalse(result.second()); + verify(capacityManager).getClusterValues(CLUSTER_ID); + verify(capacityManager).checkIfHostHasCpuCapability(any(Host.class), eq(OFFERING_CPU), eq(OFFERING_CPU_SPEED)); + verify(capacityManager).checkIfHostHasCapacity(eq(HOST_ID), eq(OFFERING_CPU * OFFERING_CPU_SPEED), + eq(ByteScaleUtils.mebibytesToBytes(OFFERING_MEMORY)), + eq(false), eq(cpuOvercommit), eq(memoryOvercommit), eq(false)); + } +} diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index 4ae871e1ba58..b0035265da2a 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -826,7 +826,7 @@ public void checkIfPodIsDeletableFailureOnClusterTest() { @Test public void checkIfZoneIsDeletableSuccessTest() { - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -840,11 +840,7 @@ public void checkIfZoneIsDeletableSuccessTest() { @Test(expected = CloudRuntimeException.class) public void checkIfZoneIsDeletableFailureOnHostTest() { - HostVO hostVO = Mockito.mock(HostVO.class); - ArrayList arrayList = new ArrayList(); - arrayList.add(hostVO); - - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(arrayList); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(List.of(1L)); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -862,7 +858,7 @@ public void checkIfZoneIsDeletableFailureOnPodTest() { ArrayList arrayList = new ArrayList(); arrayList.add(hostPodVO); - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(arrayList); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -876,7 +872,7 @@ public void checkIfZoneIsDeletableFailureOnPodTest() { @Test(expected = CloudRuntimeException.class) public void checkIfZoneIsDeletableFailureOnPrivateIpAddressTest() { - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(1); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -890,7 +886,7 @@ public void checkIfZoneIsDeletableFailureOnPrivateIpAddressTest() { @Test(expected = CloudRuntimeException.class) public void checkIfZoneIsDeletableFailureOnPublicIpAddressTest() { - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(1); @@ -908,7 +904,7 @@ public void checkIfZoneIsDeletableFailureOnVmInstanceTest() { ArrayList arrayList = new ArrayList(); arrayList.add(vMInstanceVO); - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -926,7 +922,7 @@ public void checkIfZoneIsDeletableFailureOnVolumeTest() { ArrayList arrayList = new ArrayList(); arrayList.add(volumeVO); - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); @@ -944,7 +940,7 @@ public void checkIfZoneIsDeletableFailureOnPhysicalNetworkTest() { ArrayList arrayList = new ArrayList(); arrayList.add(physicalNetworkVO); - Mockito.when(_hostDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); + Mockito.when(_hostDao.listEnabledIdsByDataCenterId(anyLong())).thenReturn(new ArrayList<>()); Mockito.when(_podDao.listByDataCenterId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_privateIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); Mockito.when(_publicIpAddressDao.countIPs(anyLong(), anyBoolean())).thenReturn(0); diff --git a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java index 482d17908f43..c197a57b1095 100644 --- a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java +++ b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java @@ -889,7 +889,7 @@ private DeploymentClusterPlanner setupMocksForPlanDeploymentHostTests(HostVO hos Pair> potentialResources = new Pair<>(host, suitableVolumeStoragePoolMap); Mockito.when(capacityMgr.checkIfHostReachMaxGuestLimit(host)).thenReturn(false); - Mockito.when(capacityMgr.checkIfHostHasCpuCapability(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.anyInt())).thenReturn(true); + Mockito.when(capacityMgr.checkIfHostHasCpuCapability(ArgumentMatchers.any(Host.class), ArgumentMatchers.anyInt(), ArgumentMatchers.anyInt())).thenReturn(true); Mockito.when(capacityMgr.checkIfHostHasCapacity( ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), @@ -1218,7 +1218,7 @@ private List prepareMockForAvoidOtherClustersForDeploymentIfMigrationDisab throw new RuntimeException(e); } List allClusters = List.of(101L, 102L, 103L, 104L); - Mockito.when(_clusterDao.listAllClusters(Mockito.anyLong())).thenReturn(allClusters); + Mockito.when(_clusterDao.listAllClusterIds(Mockito.anyLong())).thenReturn(allClusters); if (mockVolumes) { VolumeVO vol1 = Mockito.mock(VolumeVO.class); Mockito.when(vol1.getPoolId()).thenReturn(1L); diff --git a/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java b/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java index 02ddd0c983ef..a75d26f10622 100644 --- a/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/Ipv6ServiceImplTest.java @@ -16,6 +16,38 @@ // under the License. package com.cloud.network; +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.UUID; + +import javax.management.InstanceAlreadyExistsException; +import javax.management.MBeanRegistrationException; +import javax.management.MalformedObjectNameException; +import javax.management.NotCompliantMBeanException; + +import org.apache.cloudstack.api.command.user.ipv6.CreateIpv6FirewallRuleCmd; +import org.apache.cloudstack.api.command.user.ipv6.UpdateIpv6FirewallRuleCmd; +import org.apache.cloudstack.api.response.Ipv6RouteResponse; +import org.apache.cloudstack.api.response.VpcResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.commons.collections.CollectionUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; +import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + import com.cloud.api.ApiDBUtils; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterGuestIpv6PrefixVO; @@ -61,36 +93,6 @@ import com.cloud.vm.dao.NicDao; import com.googlecode.ipv6.IPv6Network; import com.googlecode.ipv6.IPv6NetworkMask; -import org.apache.cloudstack.api.command.user.ipv6.CreateIpv6FirewallRuleCmd; -import org.apache.cloudstack.api.command.user.ipv6.UpdateIpv6FirewallRuleCmd; -import org.apache.cloudstack.api.response.Ipv6RouteResponse; -import org.apache.cloudstack.api.response.VpcResponse; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.commons.collections.CollectionUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; -import org.mockito.junit.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import javax.management.InstanceAlreadyExistsException; -import javax.management.MBeanRegistrationException; -import javax.management.MalformedObjectNameException; -import javax.management.NotCompliantMBeanException; -import java.net.URI; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.UUID; @RunWith(MockitoJUnitRunner.class) public class Ipv6ServiceImplTest { diff --git a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java index 64d813c9ba8b..f968140e1906 100644 --- a/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java +++ b/server/src/test/java/com/cloud/network/NetworkServiceImplTest.java @@ -22,10 +22,10 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doReturn; import java.lang.reflect.Field; import java.util.ArrayList; @@ -36,15 +36,6 @@ import java.util.Map; import java.util.UUID; -import com.cloud.domain.Domain; -import com.cloud.domain.DomainVO; -import com.cloud.domain.dao.DomainDao; -import com.cloud.network.dao.NsxProviderDao; -import com.cloud.network.dao.PublicIpQuarantineDao; -import com.cloud.network.vo.PublicIpQuarantineVO; -import com.cloud.user.dao.AccountDao; -import com.cloud.utils.net.Ip; -import com.cloud.exception.InsufficientAddressCapacityException; import org.apache.cloudstack.alert.AlertService; import org.apache.cloudstack.api.command.user.address.UpdateQuarantinedIpCmd; import org.apache.cloudstack.api.command.user.network.CreateNetworkCmd; @@ -53,6 +44,7 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; @@ -61,8 +53,10 @@ import org.mockito.ArgumentMatchers; import org.mockito.InjectMocks; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; +import org.mockito.junit.MockitoJUnitRunner; import org.springframework.test.util.ReflectionTestUtils; import com.cloud.agent.api.to.IpAddressTO; @@ -72,6 +66,10 @@ import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.Domain; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; +import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; @@ -79,10 +77,13 @@ import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.NsxProviderDao; import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkVO; +import com.cloud.network.dao.PublicIpQuarantineDao; import com.cloud.network.router.CommandSetupHelper; import com.cloud.network.router.NetworkHelper; +import com.cloud.network.vo.PublicIpQuarantineVO; import com.cloud.network.vpc.VpcManager; import com.cloud.network.vpc.VpcVO; import com.cloud.network.vpc.dao.VpcDao; @@ -100,18 +101,17 @@ import com.cloud.user.AccountVO; import com.cloud.user.User; import com.cloud.user.UserVO; +import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.Pair; import com.cloud.utils.db.EntityManager; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.Ip; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; -import org.junit.After; -import org.mockito.MockedStatic; -import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class NetworkServiceImplTest { diff --git a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java index b391aeb9f076..bf333507f1bb 100644 --- a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java @@ -16,6 +16,66 @@ // under the License. package com.cloud.network.as; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.matches; +import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CompletionService; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; + +import org.apache.cloudstack.affinity.AffinityGroupVO; +import org.apache.cloudstack.affinity.dao.AffinityGroupDao; +import org.apache.cloudstack.annotation.AnnotationService; +import org.apache.cloudstack.annotation.dao.AnnotationDao; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.command.admin.autoscale.CreateCounterCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScalePolicyCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmGroupCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmProfileCmd; +import org.apache.cloudstack.api.command.user.autoscale.CreateConditionCmd; +import org.apache.cloudstack.api.command.user.autoscale.ListCountersCmd; +import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmGroupCmd; +import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmProfileCmd; +import org.apache.cloudstack.api.command.user.autoscale.UpdateConditionCmd; +import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; +import org.apache.cloudstack.config.ApiServiceConfiguration; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.userdata.UserDataManager; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.PerformanceMonitorAnswer; import com.cloud.agent.api.PerformanceMonitorCommand; @@ -39,6 +99,7 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceInUseException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.network.Network; @@ -104,65 +165,6 @@ import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.affinity.AffinityGroupVO; -import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.annotation.AnnotationService; -import org.apache.cloudstack.annotation.dao.AnnotationDao; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.command.admin.autoscale.CreateCounterCmd; -import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScalePolicyCmd; -import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmGroupCmd; -import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmProfileCmd; -import org.apache.cloudstack.api.command.user.autoscale.CreateConditionCmd; -import org.apache.cloudstack.api.command.user.autoscale.ListCountersCmd; -import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmGroupCmd; -import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmProfileCmd; -import org.apache.cloudstack.api.command.user.autoscale.UpdateConditionCmd; -import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; -import org.apache.cloudstack.config.ApiServiceConfiguration; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.userdata.UserDataManager; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.Spy; -import org.mockito.junit.MockitoJUnitRunner; -import org.springframework.test.util.ReflectionTestUtils; - -import java.lang.reflect.Field; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.ArgumentMatchers.matches; -import static org.mockito.ArgumentMatchers.nullable; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class AutoScaleManagerImplTest { @@ -2244,32 +2246,31 @@ public void getVmStatsFromHosts() { } @Test - public void getVmStatsByIdFromHost() { - List vmIds = Mockito.mock(ArrayList.class); - - Map result = autoScaleManagerImplSpy.getVmStatsByIdFromHost(-1L, vmIds); - + public void getVmStatsByIdFromHostNullHost() { + Map result = autoScaleManagerImplSpy.getVmStatsByIdFromHost(-1L, List.of(1L, 2L)); Assert.assertEquals(0, result.size()); + Mockito.verify(virtualMachineManager, never()).getVirtualMachineStatistics(Mockito.any(Host.class), anyList()); + } - Mockito.verify(virtualMachineManager, never()).getVirtualMachineStatistics(anyLong(), anyString(), anyList()); + @Test + public void getVmStatsByIdFromHostEmptyVmList() { + Mockito.when(hostDao.findById(1L)).thenReturn(hostMock); + Map result = autoScaleManagerImplSpy.getVmStatsByIdFromHost(1L, Collections.emptyList()); + Mockito.verify(virtualMachineManager).getVirtualMachineStatistics(hostMock, Collections.emptyList()); + Assert.assertEquals(0, result.size()); } @Test public void getVmStatsByIdFromHost2() { - List vmIds = Mockito.mock(ArrayList.class); - VmStatsEntry vmStats = new VmStatsEntry(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "vm"); + List vmIds = List.of(virtualMachineId); + VmStatsEntry vmStats = new VmStatsEntry(virtualMachineId, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "vm"); HashMap vmStatsById = new HashMap<>(); vmStatsById.put(virtualMachineId, vmStats); when(hostDao.findById(hostId)).thenReturn(hostMock); - when(hostMock.getId()).thenReturn(hostId); - when(hostMock.getName()).thenReturn(hostName); - Mockito.doReturn(vmStatsById).when(virtualMachineManager).getVirtualMachineStatistics(anyLong(), anyString(), anyList()); - + Mockito.doReturn(vmStatsById).when(virtualMachineManager).getVirtualMachineStatistics(hostMock, vmIds); Map result = autoScaleManagerImplSpy.getVmStatsByIdFromHost(hostId, vmIds); - Assert.assertEquals(vmStatsById, result); - - Mockito.verify(virtualMachineManager).getVirtualMachineStatistics(anyLong(), anyString(), anyList()); + Mockito.verify(virtualMachineManager).getVirtualMachineStatistics(Mockito.any(Host.class), anyList()); } @Test diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index 6aae7a091d3f..3de39fa21e6b 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -446,7 +446,7 @@ public List listAllHostsInOneZoneNotInClusterByHypervisors(List listAvailHypervisorInZone(final Long hostId, final Long zoneId) { + public List listAvailHypervisorInZone(final Long zoneId) { // TODO Auto-generated method stub return null; } @@ -473,7 +473,7 @@ public HostVO findHostByName(final String name) { * @see com.cloud.resource.ResourceManager#getHostStatistics(long) */ @Override - public HostStats getHostStatistics(final long hostId) { + public HostStats getHostStatistics(final Host host) { // TODO Auto-generated method stub return null; } diff --git a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java index defcd09b1744..34030626d22e 100644 --- a/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java +++ b/server/src/test/java/com/cloud/resourcelimit/ResourceLimitManagerImplTest.java @@ -59,10 +59,8 @@ import com.cloud.offering.ServiceOffering; import com.cloud.projects.ProjectVO; import com.cloud.projects.dao.ProjectDao; -import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VMTemplateDao; @@ -715,8 +713,8 @@ public void testGetVmsWithAccountAndTagNoTag() throws NoSuchFieldException, Ille @Test public void testGetVmsWithAccountAndTagNegative() { String tag = hostTags.get(0); - Mockito.when(serviceOfferingDao.listByHostTag(tag)).thenReturn(null); - Mockito.when(vmTemplateDao.listByTemplateTag(tag)).thenReturn(null); + Mockito.when(serviceOfferingDao.listIdsByHostTag(tag)).thenReturn(null); + Mockito.when(vmTemplateDao.listIdsByTemplateTag(tag)).thenReturn(null); List result = resourceLimitManager.getVmsWithAccountAndTag(1L, hostTags.get(0)); Assert.assertTrue(CollectionUtils.isEmpty(result)); } @@ -725,12 +723,8 @@ public void testGetVmsWithAccountAndTagNegative() { public void testGetVmsWithAccountAndTag() throws NoSuchFieldException, IllegalAccessException { overrideDefaultConfigValue(VirtualMachineManager.ResourceCountRunningVMsonly, "_defaultValue", "true"); String tag = hostTags.get(0); - ServiceOfferingVO serviceOfferingVO = Mockito.mock(ServiceOfferingVO.class); - Mockito.when(serviceOfferingVO.getId()).thenReturn(1L); - VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); - Mockito.when(templateVO.getId()).thenReturn(1L); - Mockito.when(serviceOfferingDao.listByHostTag(tag)).thenReturn(List.of(serviceOfferingVO)); - Mockito.when(vmTemplateDao.listByTemplateTag(tag)).thenReturn(List.of(templateVO)); + Mockito.when(serviceOfferingDao.listIdsByHostTag(tag)).thenReturn(List.of(1L)); + Mockito.when(vmTemplateDao.listIdsByTemplateTag(tag)).thenReturn(List.of(1L)); List vmList = List.of(Mockito.mock(UserVmJoinVO.class)); Mockito.when(userVmJoinDao.listByAccountServiceOfferingTemplateAndNotInState(Mockito.anyLong(), Mockito.anyList(), Mockito.anyList(), Mockito.anyList())).thenReturn(vmList); List result = resourceLimitManager.getVmsWithAccountAndTag(1L, tag); diff --git a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java index 39155986941e..2c2fb5d5f93b 100644 --- a/server/src/test/java/com/cloud/user/DomainManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/DomainManagerImplTest.java @@ -17,6 +17,30 @@ package com.cloud.user; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import org.apache.cloudstack.annotation.dao.AnnotationDao; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.framework.messagebus.MessageBus; +import org.apache.cloudstack.framework.messagebus.PublishScope; +import org.apache.cloudstack.network.RoutedIpv4Manager; +import org.apache.cloudstack.region.RegionManager; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentMatchers; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + import com.cloud.api.query.dao.DiskOfferingJoinDao; import com.cloud.api.query.dao.NetworkOfferingJoinDao; import com.cloud.api.query.dao.ServiceOfferingJoinDao; @@ -44,29 +68,6 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; -import org.apache.cloudstack.annotation.dao.AnnotationDao; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.cloudstack.framework.messagebus.MessageBus; -import org.apache.cloudstack.framework.messagebus.PublishScope; -import org.apache.cloudstack.network.RoutedIpv4Manager; -import org.apache.cloudstack.region.RegionManager; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.InjectMocks; -import org.mockito.ArgumentMatchers; -import org.mockito.Mock; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.Spy; -import org.mockito.junit.MockitoJUnitRunner; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; @RunWith(MockitoJUnitRunner.class) diff --git a/server/src/test/java/com/cloud/user/MockUsageEventDao.java b/server/src/test/java/com/cloud/user/MockUsageEventDao.java index 7e15a4f0aaf3..f73203ce5641 100644 --- a/server/src/test/java/com/cloud/user/MockUsageEventDao.java +++ b/server/src/test/java/com/cloud/user/MockUsageEventDao.java @@ -27,6 +27,7 @@ import javax.naming.ConfigurationException; import java.util.ArrayList; +import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Map; @@ -142,6 +143,11 @@ public List listAll(Filter filter) { return null; } + @Override + public List listAllIds() { + return Collections.emptyList(); + } + @Override public List search(SearchCriteria sc, Filter filter) { diff --git a/server/src/test/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImplTest.java b/server/src/test/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImplTest.java index 2f0c1c3185cd..0c0097393cae 100644 --- a/server/src/test/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImplTest.java +++ b/server/src/test/java/org/apache/cloudstack/agent/lb/IndirectAgentLBServiceImplTest.java @@ -16,13 +16,17 @@ // under the License. package org.apache.cloudstack.agent.lb; -import com.cloud.agent.AgentManager; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.resource.ResourceState; -import com.cloud.utils.exception.CloudRuntimeException; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.stream.Collectors; + import org.apache.cloudstack.config.ApiServiceConfiguration; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.messagebus.MessageBus; @@ -32,16 +36,17 @@ import org.junit.Test; import org.mockito.InjectMocks; import org.mockito.Mock; +import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.Spy; -import java.lang.reflect.Field; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; - -import static org.mockito.Mockito.when; +import com.cloud.agent.AgentManager; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceState; +import com.cloud.utils.exception.CloudRuntimeException; public class IndirectAgentLBServiceImplTest { @@ -85,6 +90,7 @@ private void addField(final IndirectAgentLBServiceImpl provider, final String na } private void configureMocks() throws NoSuchFieldException, IllegalAccessException { + List hosts = Arrays.asList(host1, host2, host3, host4); long id = 1; for (HostVO h : Arrays.asList(host1, host2, host3, host4)) { when(h.getId()).thenReturn(id); @@ -98,7 +104,9 @@ private void configureMocks() throws NoSuchFieldException, IllegalAccessExceptio addField(agentMSLB, "hostDao", hostDao); addField(agentMSLB, "agentManager", agentManager); - when(hostDao.listAll()).thenReturn(Arrays.asList(host4, host2, host1, host3)); + List hostIds = hosts.stream().map(HostVO::getId).collect(Collectors.toList()); + doReturn(hostIds).when(hostDao).findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(Mockito.anyLong(), + Mockito.eq(null), Mockito.anyList(), Mockito.anyList(), Mockito.anyList()); } @Before @@ -193,21 +201,18 @@ public void testExceptionOnEmptyHostSetting() throws NoSuchFieldException, Illeg } @Test - public void testGetOrderedRunningHostIdsNullList() { - when(hostDao.listAll()).thenReturn(null); - Assert.assertTrue(agentMSLB.getOrderedHostIdList(DC_1_ID).size() == 0); + public void testGetOrderedRunningHostIdsEmptyList() { + doReturn(Collections.emptyList()).when(hostDao).findHostIdsByZoneClusterResourceStateTypeAndHypervisorType( + Mockito.eq(DC_1_ID), Mockito.eq(null), Mockito.anyList(), Mockito.anyList(), Mockito.anyList()); + Assert.assertTrue(agentMSLB.getOrderedHostIdList(DC_1_ID).isEmpty()); } @Test public void testGetOrderedRunningHostIdsOrderList() { - when(hostDao.listAll()).thenReturn(Arrays.asList(host4, host2, host1, host3)); + doReturn(Arrays.asList(host4.getId(), host2.getId(), host1.getId(), host3.getId())).when(hostDao) + .findHostIdsByZoneClusterResourceStateTypeAndHypervisorType(Mockito.eq(DC_1_ID), Mockito.eq(null), + Mockito.anyList(), Mockito.anyList(), Mockito.anyList()); Assert.assertEquals(Arrays.asList(host1.getId(), host2.getId(), host3.getId(), host4.getId()), agentMSLB.getOrderedHostIdList(DC_1_ID)); } - - @Test - public void testGetHostsPerZoneNullHosts() { - when(hostDao.listAll()).thenReturn(null); - Assert.assertTrue(agentMSLB.getOrderedHostIdList(DC_2_ID).size() == 0); - } } diff --git a/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java b/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java index 25b4bdda45f1..403b14965b14 100644 --- a/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java +++ b/server/src/test/java/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.networkoffering; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.nullable; import java.util.HashMap; @@ -28,21 +27,21 @@ import javax.inject.Inject; -import com.cloud.network.dao.PublicIpQuarantineDao; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDao; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; import org.mockito.Mockito; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.mockito.junit.MockitoJUnitRunner; import com.cloud.configuration.ConfigurationManager; +import com.cloud.configuration.ConfigurationManagerImpl; import com.cloud.dc.dao.VlanDetailsDao; import com.cloud.event.dao.UsageEventDao; import com.cloud.event.dao.UsageEventDetailsDao; @@ -52,6 +51,7 @@ import com.cloud.network.Network.Service; import com.cloud.network.Networks.TrafficType; import com.cloud.network.dao.LoadBalancerVMMapDao; +import com.cloud.network.dao.PublicIpQuarantineDao; import com.cloud.network.vpc.VpcManager; import com.cloud.offering.NetworkOffering.Availability; import com.cloud.offerings.NetworkOfferingServiceMapVO; @@ -61,48 +61,44 @@ import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; import com.cloud.user.UserVO; -import com.cloud.utils.component.ComponentContext; import com.cloud.vm.dao.UserVmDetailsDao; + import junit.framework.TestCase; -@RunWith(SpringJUnit4ClassRunner.class) -@ContextConfiguration(locations = "classpath:/createNetworkOffering.xml") +@RunWith(MockitoJUnitRunner.class) public class CreateNetworkOfferingTest extends TestCase { - @Inject - ConfigurationManager configMgr; - - @Inject + @Mock ConfigurationDao configDao; - @Inject + @Mock NetworkOfferingDao offDao; - @Inject + @Mock NetworkOfferingServiceMapDao mapDao; - @Inject + @Mock AccountManager accountMgr; - @Inject + @Mock VpcManager vpcMgr; - @Inject + @Mock UserVmDetailsDao userVmDetailsDao; - @Inject + @Mock UsageEventDao UsageEventDao; - @Inject + @Mock UsageEventDetailsDao usageEventDetailsDao; - @Inject + @Mock UserIpAddressDetailsDao userIpAddressDetailsDao; - @Inject + @Mock LoadBalancerVMMapDao _loadBalancerVMMapDao; - @Inject + @Mock AnnotationDao annotationDao; @Inject VlanDetailsDao vlanDetailsDao; @@ -110,15 +106,12 @@ public class CreateNetworkOfferingTest extends TestCase { @Inject PublicIpQuarantineDao publicIpQuarantineDao; + @InjectMocks + ConfigurationManager configMgr = new ConfigurationManagerImpl(); + @Override @Before public void setUp() { - ComponentContext.initComponentsLifeCycle(); - - ConfigurationVO configVO = new ConfigurationVO("200", "200", "200", "200", "200", "200"); - Mockito.when(configDao.findByName(anyString())).thenReturn(configVO); - - Mockito.when(offDao.persist(any(NetworkOfferingVO.class))).thenReturn(new NetworkOfferingVO()); Mockito.when(offDao.persist(any(NetworkOfferingVO.class), nullable(Map.class))).thenReturn(new NetworkOfferingVO()); Mockito.when(mapDao.persist(any(NetworkOfferingServiceMapVO.class))).thenReturn(new NetworkOfferingServiceMapVO()); Mockito.when(accountMgr.getSystemUser()).thenReturn(new UserVO(1)); diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index ec1e8a81979e..8a6c981e5693 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.secondarystorage; +import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; + import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -157,8 +159,6 @@ import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; -import static com.cloud.configuration.Config.SecStorageAllowedInternalDownloadSites; - /** * Class to manage secondary storages.

* Possible secondary storage VM state transition cases:
@@ -810,11 +810,9 @@ public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { } public boolean isZoneReady(Map zoneHostInfoMap, long dataCenterId) { - List hosts = _hostDao.listByDataCenterId(dataCenterId); - if (CollectionUtils.isEmpty(hosts)) { - if (logger.isDebugEnabled()) { - logger.debug("Zone " + dataCenterId + " has no host available which is enabled and in Up state"); - } + Integer totalUpAndEnabledHosts = _hostDao.countUpAndEnabledHostsInZone(dataCenterId); + if (totalUpAndEnabledHosts != null && totalUpAndEnabledHosts < 1) { + logger.debug("Zone {} has no host available which is enabled and in Up state", dataCenterId); return false; } ZoneHostInfo zoneHostInfo = zoneHostInfoMap.get(dataCenterId); @@ -841,8 +839,8 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen } boolean useLocalStorage = BooleanUtils.toBoolean(ConfigurationManagerImpl.SystemVMUseLocalStorage.valueIn(dataCenterId)); - List> storagePoolHostInfos = _storagePoolHostDao.getDatacenterStoragePoolHostInfo(dataCenterId, !useLocalStorage); - if (CollectionUtils.isNotEmpty(storagePoolHostInfos) && storagePoolHostInfos.get(0).second() > 0) { + boolean hasStoragePoolHostInfo = _storagePoolHostDao.hasDatacenterStoragePoolHostInfo(dataCenterId, !useLocalStorage); + if (hasStoragePoolHostInfo) { return true; } else { if (logger.isDebugEnabled()) { diff --git a/setup/db/create-schema-simulator.sql b/setup/db/create-schema-simulator.sql index 73896af1f12b..f52faa043d8f 100644 --- a/setup/db/create-schema-simulator.sql +++ b/setup/db/create-schema-simulator.sql @@ -64,7 +64,8 @@ CREATE TABLE `simulator`.`mockstoragepool` ( `capacity` bigint, `pool_type` varchar(40), `hostguid` varchar(255) UNIQUE, - PRIMARY KEY (`id`) + PRIMARY KEY (`id`), + INDEX `i_mockstoragepool__guid`(`guid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; diff --git a/test/integration/smoke/test_dynamicroles.py b/test/integration/smoke/test_dynamicroles.py index b91ba9c2eba8..e404835fbb8a 100644 --- a/test/integration/smoke/test_dynamicroles.py +++ b/test/integration/smoke/test_dynamicroles.py @@ -18,7 +18,7 @@ from marvin.cloudstackAPI import * from marvin.cloudstackTestCase import cloudstackTestCase from marvin.cloudstackException import CloudstackAPIException -from marvin.lib.base import Account, Role, RolePermission +from marvin.lib.base import Account, Role, RolePermission, Configurations from marvin.lib.utils import cleanup_resources from nose.plugins.attrib import attr from random import shuffle @@ -26,6 +26,7 @@ import copy import random import re +import time class TestData(object): @@ -109,6 +110,14 @@ def setUp(self): self.testdata["account"], roleid=self.role.id ) + + cache_period_config = Configurations.list( + self.apiclient, + name='dynamic.apichecker.cache.period' + )[0] + + self.cache_period = int(cache_period_config.value) + self.cleanup = [ self.account, self.rolepermission, @@ -623,6 +632,8 @@ def test_role_account_acls(self): testdata ) + time.sleep(self.cache_period + 5) + userApiClient = self.getUserApiClient(self.account.name, domain=self.account.domain, role_type=self.account.roletype) # Perform listApis check @@ -645,6 +656,8 @@ def test_role_account_acls_multiple_mgmt_servers(self): self.dbclient.execute("insert into role_permissions (uuid, role_id, rule, permission, sort_order) values (UUID(), %d, '%s', '%s', %d)" % (roleId, rule, perm.upper(), sortOrder)) sortOrder += 1 + time.sleep(self.cache_period + 5) + userApiClient = self.getUserApiClient(self.account.name, domain=self.account.domain, role_type=self.account.roletype) # Perform listApis check diff --git a/ui/src/config/section/infra/hosts.js b/ui/src/config/section/infra/hosts.js index 727da7242d7d..5def7f3b7fc1 100644 --- a/ui/src/config/section/infra/hosts.js +++ b/ui/src/config/section/infra/hosts.js @@ -27,7 +27,7 @@ export default { searchFilters: ['name', 'zoneid', 'podid', 'clusterid', 'hypervisor'], resourceType: 'Host', filters: () => { - const filters = ['enabled', 'disabled', 'maintenance', 'up', 'down', 'alert'] + const filters = ['enabled', 'disabled', 'maintenance', 'up', 'down', 'disconnected', 'alert'] return filters }, params: { type: 'routing' }, diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index bfdde77191ee..0209e1440d4d 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -1752,7 +1752,7 @@ export default { if (filter === 'all') { delete query.resourcestate delete query.state - } else if (['up', 'down', 'alert'].includes(filter)) { + } else if (['up', 'down', 'disconnected', 'alert'].includes(filter)) { delete query.resourcestate query.state = filter } else { diff --git a/ui/src/views/dashboard/CapacityDashboard.vue b/ui/src/views/dashboard/CapacityDashboard.vue index dae53cf0015b..76a72243ac9d 100644 --- a/ui/src/views/dashboard/CapacityDashboard.vue +++ b/ui/src/views/dashboard/CapacityDashboard.vue @@ -41,7 +41,7 @@
+ @click="() => { updateData(zoneSelected, true); listAlerts(); listEvents(); }"> {{ $t('label.fetch.latest') }} @@ -170,7 +170,7 @@ - +