Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions conf/serviceConfig/vmInstance.xml
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,9 @@
<message>
<name>org.zstack.header.vm.APICleanupVmInstanceMetadataMsg</name>
</message>
<message>
<name>org.zstack.header.vm.APICleanupAllVmInstanceMetadataMsg</name>
</message>
<message>
<name>org.zstack.header.vm.APIRegisterVmInstanceFromMetadataMsg</name>
</message>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
package org.zstack.header.storage.primary;

import org.zstack.header.message.NeedReplyMessage;

public class CleanupAllVmMetadataOnPrimaryStorageMsg extends NeedReplyMessage implements PrimaryStorageMessage {
private String primaryStorageUuid;
private String metadataDir;

@Override
public String getPrimaryStorageUuid() {
return primaryStorageUuid;
}

public void setPrimaryStorageUuid(String primaryStorageUuid) {
this.primaryStorageUuid = primaryStorageUuid;
}

public String getMetadataDir() {
return metadataDir;
}

public void setMetadataDir(String metadataDir) {
this.metadataDir = metadataDir;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
package org.zstack.header.storage.primary;

import org.zstack.header.message.MessageReply;

public class CleanupAllVmMetadataOnPrimaryStorageReply extends MessageReply {
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
package org.zstack.header.vm;

import org.zstack.header.message.APIEvent;
import org.zstack.header.rest.RestResponse;

import java.util.List;

@RestResponse(fieldsTo = {"all"})
public class APICleanupAllVmInstanceMetadataEvent extends APIEvent {
private List<String> failedPrimaryStorageUuids;

public APICleanupAllVmInstanceMetadataEvent() {
super(null);
}

public APICleanupAllVmInstanceMetadataEvent(String apiId) {
super(apiId);
}

public List<String> getFailedPrimaryStorageUuids() {
return failedPrimaryStorageUuids;
}

public void setFailedPrimaryStorageUuids(List<String> failedPrimaryStorageUuids) {
this.failedPrimaryStorageUuids = failedPrimaryStorageUuids;
}

public static APICleanupAllVmInstanceMetadataEvent __example__() {
APICleanupAllVmInstanceMetadataEvent evt = new APICleanupAllVmInstanceMetadataEvent();
evt.failedPrimaryStorageUuids = java.util.Collections.emptyList();
return evt;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
package org.zstack.header.vm

import org.zstack.header.errorcode.ErrorCode

doc {

title "清理全部云主机元数据返回"

field {
name "failedPrimaryStorageUuids"
desc "清理失败的主存储UUID列表;具体失败原因汇总见 error 字段,逐条详情见 mn / agent 日志"
type "List"
since "5.0.0"
}
field {
name "success"
desc "操作是否成功;任一主存储清理失败则为 false"
type "boolean"
since "5.0.0"
}
ref {
name "error"
path "org.zstack.header.vm.APICleanupAllVmInstanceMetadataEvent.error"
desc "错误码;success=false 时聚合所有失败主存储的失败原因"
type "ErrorCode"
since "5.0.0"
clz ErrorCode.class
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
package org.zstack.header.vm;

import org.springframework.http.HttpMethod;
import org.zstack.header.message.APIMessage;
import org.zstack.header.message.APIParam;
import org.zstack.header.rest.RestRequest;
import org.zstack.header.storage.primary.PrimaryStorageVO;

import java.util.List;

@RestRequest(
path = "/vm-instances/metadata",
method = HttpMethod.DELETE,
responseClass = APICleanupAllVmInstanceMetadataEvent.class,
isAction = true
)
public class APICleanupAllVmInstanceMetadataMsg extends APIMessage {
@APIParam(resourceType = PrimaryStorageVO.class, required = false)
private List<String> primaryStorageUuids;

public List<String> getPrimaryStorageUuids() {
return primaryStorageUuids;
}

public void setPrimaryStorageUuids(List<String> primaryStorageUuids) {
this.primaryStorageUuids = primaryStorageUuids;
}

public static APICleanupAllVmInstanceMetadataMsg __example__() {
APICleanupAllVmInstanceMetadataMsg msg = new APICleanupAllVmInstanceMetadataMsg();
msg.primaryStorageUuids = java.util.Arrays.asList(uuid(), uuid());
return msg;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package org.zstack.header.vm

import org.zstack.header.vm.APICleanupAllVmInstanceMetadataEvent

doc {
title "清理全部云主机元数据"

category "云主机"

desc """清理一个或多个主存储上保存的全部云主机元数据文件,仅管理员可调用。当 primaryStorageUuids 为空(未传或传空列表)时,将清理系统中所有 Enabled+Connected 且支持云主机元数据的主存储;否则仅清理列表中指定的主存储。"""

rest {
request {
url "DELETE /v1/vm-instances/metadata"

header (Authorization: 'OAuth the-session-uuid')

clz APICleanupAllVmInstanceMetadataMsg.class

desc """"""

params {

column {
name "primaryStorageUuids"
enclosedIn "cleanupAllVmInstanceMetadata"
desc "需要清理云主机元数据的主存储UUID列表;为空时清理所有 Enabled+Connected 主存储上的元数据"
location "body"
type "List"
optional true
since "5.0.0"
}
column {
name "systemTags"
enclosedIn ""
desc "系统标签"
location "query"
type "List"
optional true
since "5.0.0"
}
column {
name "userTags"
enclosedIn ""
desc "用户标签"
location "query"
type "List"
optional true
since "5.0.0"
}
}
}

response {
clz APICleanupAllVmInstanceMetadataEvent.class
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@
import static org.zstack.utils.CollectionUtils.toMap;
import static org.zstack.utils.CollectionUtils.transformAndRemoveNull;

import java.util.concurrent.atomic.AtomicInteger;

/**
* Created by frank on 6/30/2015.
*/
Expand Down Expand Up @@ -907,6 +909,8 @@ public void handleLocalMessage(Message msg) {
handle((CommitVolumeSnapshotOnPrimaryStorageMsg) msg);
} else if (msg instanceof PullVolumeSnapshotOnPrimaryStorageMsg) {
handle((PullVolumeSnapshotOnPrimaryStorageMsg) msg);
} else if (msg instanceof CleanupAllVmMetadataOnPrimaryStorageMsg) {
handle((CleanupAllVmMetadataOnPrimaryStorageMsg) msg);
} else {
super.handleLocalMessage(msg);
}
Expand Down Expand Up @@ -3625,4 +3629,64 @@ public String getName() {
}
});
}

@Override
protected void handle(final CleanupAllVmMetadataOnPrimaryStorageMsg msg) {
CleanupAllVmMetadataOnPrimaryStorageReply reply = new CleanupAllVmMetadataOnPrimaryStorageReply();

List<String> connectedHostUuids = getConnectedLocalStorageHostUuids();
if (connectedHostUuids.isEmpty()) {
logger.warn(String.format("[MetadataCleanup] cleanAll: no connected host found for local ps[uuid:%s]", self.getUuid()));
bus.reply(msg, reply);
return;
}
Comment on lines +3637 to +3642
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

cleanup-all 未覆盖断连主机,失败统计会被低估。

当前仅以 Connected 主机作为清理集合;当存在 Disconnected 主机时,failedCountfailedHostUuids 会遗漏,Line 3638 在“无连接主机”场景也会返回 0 失败,和“全量清理”语义不一致。

建议修复(示例)
-        List<String> connectedHostUuids = getConnectedLocalStorageHostUuids();
+        List<String> allHostUuids = getAllLocalStorageHostUuids();
+        List<String> connectedHostUuids = getConnectedLocalStorageHostUuids();
+        List<String> disconnectedHostUuids = new ArrayList<>(allHostUuids);
+        disconnectedHostUuids.removeAll(connectedHostUuids);

         if (connectedHostUuids.isEmpty()) {
             logger.warn(String.format("[MetadataCleanup] cleanAll: no connected host found for local ps[uuid:%s]", self.getUuid()));
             reply.setCleanedCount(0);
-            reply.setFailedCount(0);
+            reply.setFailedCount(disconnectedHostUuids.size());
+            reply.setFailedHostUuids(disconnectedHostUuids);
             bus.reply(msg, reply);
             return;
         }

         AtomicInteger totalCleaned = new AtomicInteger(0);
-        AtomicInteger totalFailed = new AtomicInteger(0);
-        List<String> failedHostUuids = Collections.synchronizedList(new ArrayList<>());
+        AtomicInteger totalFailed = new AtomicInteger(disconnectedHostUuids.size());
+        List<String> failedHostUuids = Collections.synchronizedList(new ArrayList<>(disconnectedHostUuids));
+    private List<String> getAllLocalStorageHostUuids() {
+        return SQL.New(
+                        "select h.hostUuid from LocalStorageHostRefVO h where h.primaryStorageUuid = :psUuid", String.class)
+                .param("psUuid", self.getUuid())
+                .list();
+    }

Also applies to: 3699-3708

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageBase.java`
around lines 3637 - 3644, The cleanup-all flow in LocalStorageBase only iterates
connected hosts (via getConnectedLocalStorageHostUuids) and treats "no connected
host" as zero failures, which undercounts failures and misses disconnected hosts
in failedCount/failedHostUuids; update the cleanup-all handling in
LocalStorageBase so it builds the host list from all known local-storage hosts
(or union of connected + disconnected), attempt/record cleanup per-host, and
when there are zero connected hosts still populate reply.failedCount and
reply.failedHostUuids with the disconnected hosts (and include self.getUuid in
the log message), ensuring reply.setCleanedCount, reply.setFailedCount and
reply.setFailedHostUuids are set before bus.reply(msg, reply); modify the code
paths around getConnectedLocalStorageHostUuids, reply, msg, and variables
failedCount/failedHostUuids to aggregate results for both connected and
disconnected hosts.


new While<>(connectedHostUuids).all((hostUuid, com) -> {
final LocalStorageHypervisorBackend bkd;
try {
LocalStorageHypervisorFactory f = getHypervisorBackendFactoryByHostUuid(hostUuid);
bkd = f.getHypervisorBackend(self);
} catch (Exception e) {
logger.warn(String.format("[MetadataCleanup] cleanAll: failed to prepare backend for host[uuid:%s] on ps[uuid:%s]: %s",
hostUuid, self.getUuid(), e.getMessage()));
com.addError(operr("host[uuid:%s] backend prepare failed: %s", hostUuid, e.getMessage()));
com.done();
return;
}
bkd.handle(msg, hostUuid, new ReturnValueCompletion<CleanupAllVmMetadataOnPrimaryStorageReply>(com) {
@Override
public void success(CleanupAllVmMetadataOnPrimaryStorageReply returnValue) {
com.done();
}

@Override
public void fail(ErrorCode errorCode) {
logger.warn(String.format("[MetadataCleanup] cleanAll: failed on host[uuid:%s] on ps[uuid:%s]: %s",
hostUuid, self.getUuid(), errorCode));
com.addError(operr("host[uuid:%s]: %s", hostUuid, errorCode.getDescription()));
com.done();
}
});
}).run(new WhileDoneCompletion(msg) {
@Override
public void done(ErrorCodeList errorCodeList) {
if (!errorCodeList.getCauses().isEmpty()) {
Comment thread
coderabbitai[bot] marked this conversation as resolved.
reply.setError(operr("local primary storage[uuid:%s] cleanAll failed on %d/%d host(s): %s",
self.getUuid(), errorCodeList.getCauses().size(), connectedHostUuids.size(), errorCodeList));
}
bus.reply(msg, reply);
}
});
}

private List<String> getConnectedLocalStorageHostUuids() {
return SQL.New(
"select h.hostUuid from LocalStorageHostRefVO h, HostVO host" +
" where h.primaryStorageUuid = :psUuid" +
" and h.hostUuid = host.uuid" +
" and host.status = :hstatus", String.class)
.param("psUuid", self.getUuid())
.param("hstatus", HostStatus.Connected)
.list();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -132,5 +132,7 @@ public LocalStorageHypervisorBackend(PrimaryStorageVO self) {

abstract void handle(CleanupVmInstanceMetadataOnPrimaryStorageMsg msg, String hostUuid, ReturnValueCompletion<CleanupVmInstanceMetadataOnPrimaryStorageReply> completion);

abstract void handle(CleanupAllVmMetadataOnPrimaryStorageMsg msg, String hostUuid, ReturnValueCompletion<CleanupAllVmMetadataOnPrimaryStorageReply> completion);

abstract void handle(RebaseVolumeBackingFileOnPrimaryStorageMsg msg, String hostUuid, ReturnValueCompletion<RebaseVolumeBackingFileOnPrimaryStorageReply> completion);
}
Original file line number Diff line number Diff line change
Expand Up @@ -950,6 +950,13 @@ public static class CleanupVmMetadataCmd extends AgentCommand {
public static class CleanupVmMetadataRsp extends AgentResponse {
}

public static class CleanupAllVmMetadataCmd extends AgentCommand {
public String metadataDir;
}

public static class CleanupAllVmMetadataRsp extends AgentResponse {
}
Comment on lines +953 to +958
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

cleanup-all 响应未承载并回传统计字段,结果可观测性不足。

CleanupAllVmMetadataRsp 为空且成功分支返回空 reply,cleanedCount/failedCount 无法透传到上层聚合。

🔧 建议修复
     public static class CleanupAllVmMetadataRsp extends AgentResponse {
+        public int cleanedCount;
+        public int failedCount;
     }
             public void success(CleanupAllVmMetadataRsp rsp) {
                 CleanupAllVmMetadataOnPrimaryStorageReply reply = new CleanupAllVmMetadataOnPrimaryStorageReply();
+                reply.setCleanedCount(rsp.cleanedCount);
+                reply.setFailedCount(rsp.failedCount);
                 completion.success(reply);
             }

Also applies to: 3966-3983

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@plugin/localstorage/src/main/java/org/zstack/storage/primary/local/LocalStorageKvmBackend.java`
around lines 953 - 958, The response class CleanupAllVmMetadataRsp should carry
the cleanup metrics so callers can observe results: add integer fields
cleanedCount and failedCount (with getters/setters or public fields as your
style) to CleanupAllVmMetadataRsp, populate those fields when constructing the
reply in the cleanup handler that processes CleanupAllVmMetadataCmd (ensure both
success and partial-failure paths set the counts), and return that populated
CleanupAllVmMetadataRsp instead of an empty reply; apply the same change to the
other identical spot referenced (the duplicate block at 3966-3983) so both reply
types convey cleanedCount/failedCount upstream.


public static class PrefixRebaseBackingFilesCmd extends LocalStorageKvmBackend.AgentCommand {
public List<String> filePaths;
public String oldPrefix;
Expand Down Expand Up @@ -996,6 +1003,7 @@ public static class PrefixRebaseBackingFilesRsp extends LocalStorageKvmBackend.A
public static final String GET_VM_INSTANCE_METADATA_PATH = "/localstorage/vm/metadata/get";
public static final String SCAN_VM_METADATA_PATH = "/localstorage/vm/metadata/scan";
public static final String CLEANUP_VM_METADATA_PATH = "/localstorage/vm/metadata/cleanup";
public static final String CLEANUP_ALL_VM_METADATA_PATH = "/localstorage/vm/metadata/cleanup-all";
public static final String PREFIX_REBASE_BACKING_FILES_PATH = "/localstorage/snapshot/prefixrebasebackingfiles";

public LocalStorageKvmBackend() {
Expand Down Expand Up @@ -3955,6 +3963,25 @@ public void fail(ErrorCode errorCode) {
});
}

@Override
void handle(CleanupAllVmMetadataOnPrimaryStorageMsg msg, String hostUuid, ReturnValueCompletion<CleanupAllVmMetadataOnPrimaryStorageReply> completion) {
CleanupAllVmMetadataCmd cmd = new CleanupAllVmMetadataCmd();
cmd.metadataDir = msg.getMetadataDir();

httpCall(CLEANUP_ALL_VM_METADATA_PATH, hostUuid, cmd, CleanupAllVmMetadataRsp.class, new ReturnValueCompletion<CleanupAllVmMetadataRsp>(completion) {
@Override
public void success(CleanupAllVmMetadataRsp rsp) {
CleanupAllVmMetadataOnPrimaryStorageReply reply = new CleanupAllVmMetadataOnPrimaryStorageReply();
completion.success(reply);
}

@Override
public void fail(ErrorCode errorCode) {
completion.fail(errorCode);
}
});
}

@Override
void handle(RebaseVolumeBackingFileOnPrimaryStorageMsg msg, String hostUuid, ReturnValueCompletion<RebaseVolumeBackingFileOnPrimaryStorageReply> completion) {
PrefixRebaseBackingFilesCmd cmd = new PrefixRebaseBackingFilesCmd();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,8 @@ protected void handleLocalMessage(Message msg) {
handle((PullVolumeSnapshotOnPrimaryStorageMsg) msg);
} else if (msg instanceof RebaseVolumeBackingFileOnPrimaryStorageMsg) {
handle((RebaseVolumeBackingFileOnPrimaryStorageMsg) msg);
} else if (msg instanceof CleanupAllVmMetadataOnPrimaryStorageMsg) {
handle((CleanupAllVmMetadataOnPrimaryStorageMsg) msg);
} else {
super.handleLocalMessage(msg);
}
Expand Down Expand Up @@ -2089,4 +2091,42 @@ public void fail(ErrorCode errorCode) {
}
});
}

@Override
protected void handle(CleanupAllVmMetadataOnPrimaryStorageMsg msg) {
CleanupAllVmMetadataOnPrimaryStorageReply reply = new CleanupAllVmMetadataOnPrimaryStorageReply();
List<HostInventory> connectedHosts = factory.getConnectedHostForOperation(getSelfInventory());
if (connectedHosts.isEmpty()) {
reply.setError(operr("no connected host found for NFS primary storage[uuid:%s]", self.getUuid()));
bus.reply(msg, reply);
return;
}
cleanupAllOnHostWithFallback(msg, reply, connectedHosts, 0);
}

private void cleanupAllOnHostWithFallback(CleanupAllVmMetadataOnPrimaryStorageMsg msg,
CleanupAllVmMetadataOnPrimaryStorageReply reply,
List<HostInventory> connectedHosts, int idx) {
if (idx >= connectedHosts.size()) {
reply.setError(operr("failed to cleanup all vm metadata on NFS primary storage[uuid:%s] after trying %d connected host(s)",
self.getUuid(), connectedHosts.size()));
bus.reply(msg, reply);
Comment on lines +2110 to +2113
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

最终失败错误缺少根因信息,接口可观测性偏弱。

当前重试全部失败后仅返回通用错误,调用方拿不到最后一次失败的具体原因。建议在最终 reply.setError(...) 中带上最近一次 ErrorCode 细节。

🔧 建议修改
-        cleanupAllOnHostWithFallback(msg, reply, connectedHosts, 0);
+        cleanupAllOnHostWithFallback(msg, reply, connectedHosts, 0, null);
     }
 
     private void cleanupAllOnHostWithFallback(CleanupAllVmMetadataOnPrimaryStorageMsg msg,
                                               CleanupAllVmMetadataOnPrimaryStorageReply reply,
-                                              List<HostInventory> connectedHosts, int idx) {
+                                              List<HostInventory> connectedHosts, int idx,
+                                              ErrorCode lastError) {
         if (idx >= connectedHosts.size()) {
-            reply.setError(operr("failed to cleanup all vm metadata on NFS primary storage[uuid:%s] after trying %d connected host(s)",
-                    self.getUuid(), connectedHosts.size()));
+            reply.setError(lastError == null
+                    ? operr("failed to cleanup all vm metadata on NFS primary storage[uuid:%s] after trying %d connected host(s)",
+                        self.getUuid(), connectedHosts.size())
+                    : operr("failed to cleanup all vm metadata on NFS primary storage[uuid:%s] after trying %d connected host(s), last error: %s",
+                        self.getUuid(), connectedHosts.size(), lastError.getDetails()));
             bus.reply(msg, reply);
             return;
         }
@@
             public void fail(ErrorCode errorCode) {
                 logger.warn(String.format("[MetadataCleanup] cleanAll: NFS ps[uuid:%s] failed on host[uuid:%s]: %s; trying next connected host",
                         self.getUuid(), hostUuid, errorCode));
-                cleanupAllOnHostWithFallback(msg, reply, connectedHosts, idx + 1);
+                cleanupAllOnHostWithFallback(msg, reply, connectedHosts, idx + 1, errorCode);
             }
         });
     }

Also applies to: 2125-2129

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java`
around lines 2110 - 2113, The final failure reply in NfsPrimaryStorage (the
block that checks if idx >= connectedHosts.size() and calls
reply.setError(operr(...))) omits the root cause; capture the last
ErrorCode/Exception from the per-host retry loop (e.g., store it in a variable
like lastError or lastErr when individual attempts fail) and include its details
in the final reply.setError message (augment operr(...) with the
lastError.getDetails()/toString()). Apply the same change to the analogous block
around 2125-2129 so the reply contains the most recent failure information for
observability.

return;
}
String hostUuid = connectedHosts.get(idx).getUuid();
final NfsPrimaryStorageBackend backend = getBackendByHostUuid(hostUuid);
backend.handle(msg, hostUuid, new ReturnValueCompletion<CleanupAllVmMetadataOnPrimaryStorageReply>(msg) {
@Override
public void success(CleanupAllVmMetadataOnPrimaryStorageReply r) {
bus.reply(msg, r);
}

@Override
public void fail(ErrorCode errorCode) {
logger.warn(String.format("[MetadataCleanup] cleanAll: NFS ps[uuid:%s] failed on host[uuid:%s]: %s; trying next connected host",
self.getUuid(), hostUuid, errorCode));
cleanupAllOnHostWithFallback(msg, reply, connectedHosts, idx + 1);
}
});
Comment on lines +2095 to +2130
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

不要把批量清理硬性绑定到在线 host。

当前实现要求 connectedHosts 非空,并且只取第一个 host 作为后端路由;这会让主存储在所有主机短暂离线时无法执行批量元数据清理,也让后端选择依赖列表顺序。既然这个 reply 已经有计数字段,更适合返回 0 计数并记录告警,或者改为从主存储/集群元信息推导后端后再执行。
Based on learnings, VmInstanceResourceMetadataManager.deleteVmResourceMetadata(...) failures are commonly treated as best-effort and do not block the primary operation.

🛠️ 建议的调整
         List<HostInventory> connectedHosts = factory.getConnectedHostForOperation(getSelfInventory());
         if (connectedHosts.isEmpty()) {
-            reply.setError(operr("no connected host found for NFS primary storage[uuid:%s]", self.getUuid()));
+            logger.warn(String.format(
+                    "no connected host found for NFS primary storage[uuid:%s] when cleaning VM metadata",
+                    self.getUuid()));
+            reply.setCleanedCount(0);
+            reply.setFailedCount(0);
             bus.reply(msg, reply);
             return;
         }
         String hostUuid = connectedHosts.get(0).getUuid();
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@plugin/nfsPrimaryStorage/src/main/java/org/zstack/storage/primary/nfs/NfsPrimaryStorage.java`
around lines 2095 - 2117, The current
handle(CleanupAllVmMetadataOnPrimaryStorageMsg) binds cleanup to a single online
host (connectedHosts.get(0)) and fails when no hosts are connected; change it to
be best-effort: when connectedHosts.isEmpty() do not set reply.error — set
reply.setCount(0), log a warning, and bus.reply(msg, reply) instead of failing;
otherwise avoid hard-binding to the first host by either deriving the backend
from primary/cluster metadata or iterating available connectedHosts and calling
getBackendByHostUuid(hostUuid) for each until one succeeds; ensure cleanup logic
(and calls to VmInstanceResourceMetadataManager.deleteVmResourceMetadata(...))
treats failures as non-fatal and aggregates success/failure counts into
CleanupAllVmMetadataOnPrimaryStorageReply rather than returning an error.

}
}
Loading